2024-11-28 00:39:19,487 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-28 00:39:19,503 main DEBUG Took 0.012240 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-28 00:39:19,503 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-28 00:39:19,504 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-28 00:39:19,505 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-28 00:39:19,506 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,516 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-28 00:39:19,546 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,559 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,560 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,561 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,561 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,562 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,564 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,565 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,566 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,567 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,568 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,569 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,570 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,570 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,571 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,572 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,572 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,573 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,574 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,574 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,575 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,576 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,577 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,577 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:39:19,578 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,578 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-28 00:39:19,581 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:39:19,590 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-28 00:39:19,593 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-28 00:39:19,594 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-28 00:39:19,596 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-28 00:39:19,598 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-28 00:39:19,608 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-28 00:39:19,612 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-28 00:39:19,616 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-28 00:39:19,618 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-28 00:39:19,619 main DEBUG createAppenders(={Console}) 2024-11-28 00:39:19,620 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-11-28 00:39:19,621 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-28 00:39:19,623 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-11-28 00:39:19,624 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-28 00:39:19,625 main DEBUG OutputStream closed 2024-11-28 00:39:19,627 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-28 00:39:19,628 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-28 00:39:19,629 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-11-28 00:39:19,774 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-28 00:39:19,777 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-28 00:39:19,779 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-28 00:39:19,780 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-28 00:39:19,782 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-28 00:39:19,787 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-28 00:39:19,787 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-28 00:39:19,788 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-28 00:39:19,788 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-28 00:39:19,789 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-28 00:39:19,789 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-28 00:39:19,789 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-28 00:39:19,790 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-28 00:39:19,790 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-28 00:39:19,791 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-28 00:39:19,791 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-28 00:39:19,792 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-28 00:39:19,793 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-28 00:39:19,798 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28 00:39:19,799 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-11-28 00:39:19,799 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-28 00:39:19,801 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-11-28T00:39:19,840 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-28 00:39:19,844 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-28 00:39:19,845 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28T00:39:20,535 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3 2024-11-28T00:39:20,535 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-11-28T00:39:20,536 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-11-28T00:39:20,597 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-28T00:39:20,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-28T00:39:20,838 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e, deleteOnExit=true 2024-11-28T00:39:20,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-28T00:39:20,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/test.cache.data in system properties and HBase conf 2024-11-28T00:39:20,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.tmp.dir in system properties and HBase conf 2024-11-28T00:39:20,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir in system properties and HBase conf 2024-11-28T00:39:20,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-28T00:39:20,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-28T00:39:20,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-28T00:39:20,943 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-28T00:39:20,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:39:20,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:39:20,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-28T00:39:20,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:39:21,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-28T00:39:21,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-28T00:39:21,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:39:21,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:39:21,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-28T00:39:21,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/nfs.dump.dir in system properties and HBase conf 2024-11-28T00:39:21,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir in system properties and HBase conf 2024-11-28T00:39:21,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:39:21,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-28T00:39:21,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-28T00:39:22,486 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-28T00:39:22,626 INFO [Time-limited test {}] log.Log(170): Logging initialized @4436ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-28T00:39:22,782 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:22,886 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:22,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:22,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:22,920 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:39:22,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:22,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:22,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:23,163 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5996a1b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-36369-hadoop-hdfs-3_4_1-tests_jar-_-any-8891268833118170470/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T00:39:23,178 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:36369} 2024-11-28T00:39:23,178 INFO [Time-limited test {}] server.Server(415): Started @4990ms 2024-11-28T00:39:23,846 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:23,861 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:23,865 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:23,865 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:23,866 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:39:23,868 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:23,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:24,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64986ec5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-36565-hadoop-hdfs-3_4_1-tests_jar-_-any-7003813152021409519/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:39:24,018 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:36565} 2024-11-28T00:39:24,019 INFO [Time-limited test {}] server.Server(415): Started @5830ms 2024-11-28T00:39:24,106 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T00:39:24,268 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:24,276 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:24,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:24,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:24,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:39:24,293 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:24,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:24,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b9a68a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-43333-hadoop-hdfs-3_4_1-tests_jar-_-any-17399681348193548887/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:39:24,456 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:43333} 2024-11-28T00:39:24,457 INFO [Time-limited test {}] server.Server(415): Started @6269ms 2024-11-28T00:39:24,468 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T00:39:24,570 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:24,577 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:24,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:24,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:24,586 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:39:24,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:24,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:24,734 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@367f3488{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-41329-hadoop-hdfs-3_4_1-tests_jar-_-any-5282992821461564599/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:39:24,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:41329} 2024-11-28T00:39:24,735 INFO [Time-limited test {}] server.Server(415): Started @6547ms 2024-11-28T00:39:24,738 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T00:39:26,671 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1/current/BP-823006435-172.17.0.3-1732754361847/current, will proceed with Du for space computation calculation, 2024-11-28T00:39:26,671 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/current/BP-823006435-172.17.0.3-1732754361847/current, will proceed with Du for space computation calculation, 2024-11-28T00:39:26,672 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3/current/BP-823006435-172.17.0.3-1732754361847/current, will proceed with Du for space computation calculation, 2024-11-28T00:39:26,673 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/current/BP-823006435-172.17.0.3-1732754361847/current, will proceed with Du for space computation calculation, 2024-11-28T00:39:26,701 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5/current/BP-823006435-172.17.0.3-1732754361847/current, will proceed with Du for space computation calculation, 2024-11-28T00:39:26,705 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/current/BP-823006435-172.17.0.3-1732754361847/current, will proceed with Du for space computation calculation, 2024-11-28T00:39:26,752 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T00:39:26,753 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T00:39:26,759 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T00:39:26,811 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5c8bcf6b52875a8 with lease ID 0xd49871652b7ac568: Processing first storage report for DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3 from datanode DatanodeRegistration(127.0.0.1:45777, datanodeUuid=82520e50-734f-4a6f-ad48-6d1a83946010, infoPort=33685, infoSecurePort=0, ipcPort=39661, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847) 2024-11-28T00:39:26,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5c8bcf6b52875a8 with lease ID 0xd49871652b7ac568: from storage DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3 node DatanodeRegistration(127.0.0.1:45777, datanodeUuid=82520e50-734f-4a6f-ad48-6d1a83946010, infoPort=33685, infoSecurePort=0, ipcPort=39661, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-28T00:39:26,812 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x181a3e0ede4725c8 with lease ID 0xd49871652b7ac56a: Processing first storage report for DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2 from datanode DatanodeRegistration(127.0.0.1:38327, datanodeUuid=04e962cb-71ed-4ca5-b582-c86dc7adf5d6, infoPort=42575, infoSecurePort=0, ipcPort=39593, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847) 2024-11-28T00:39:26,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x181a3e0ede4725c8 with lease ID 0xd49871652b7ac56a: from storage DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2 node DatanodeRegistration(127.0.0.1:38327, datanodeUuid=04e962cb-71ed-4ca5-b582-c86dc7adf5d6, infoPort=42575, infoSecurePort=0, ipcPort=39593, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:39:26,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4e0e908d08ea5ba with lease ID 0xd49871652b7ac569: Processing first storage report for DS-9a7ec647-91e4-4d14-be05-a67d2857cf91 from datanode DatanodeRegistration(127.0.0.1:46245, datanodeUuid=cc7e1181-0799-4a46-b365-fb23278a2771, infoPort=40703, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847) 2024-11-28T00:39:26,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4e0e908d08ea5ba with lease ID 0xd49871652b7ac569: from storage DS-9a7ec647-91e4-4d14-be05-a67d2857cf91 node DatanodeRegistration(127.0.0.1:46245, datanodeUuid=cc7e1181-0799-4a46-b365-fb23278a2771, infoPort=40703, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:39:26,814 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5c8bcf6b52875a8 with lease ID 0xd49871652b7ac568: Processing first storage report for DS-a6fb34b3-f6c8-48a9-b24e-00a30de2ff62 from datanode DatanodeRegistration(127.0.0.1:45777, datanodeUuid=82520e50-734f-4a6f-ad48-6d1a83946010, infoPort=33685, infoSecurePort=0, ipcPort=39661, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847) 2024-11-28T00:39:26,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5c8bcf6b52875a8 with lease ID 0xd49871652b7ac568: from storage DS-a6fb34b3-f6c8-48a9-b24e-00a30de2ff62 node DatanodeRegistration(127.0.0.1:45777, datanodeUuid=82520e50-734f-4a6f-ad48-6d1a83946010, infoPort=33685, infoSecurePort=0, ipcPort=39661, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:39:26,814 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x181a3e0ede4725c8 with lease ID 0xd49871652b7ac56a: Processing first storage report for DS-bc26f365-21cc-4ccc-8495-660c9cdefc04 from datanode DatanodeRegistration(127.0.0.1:38327, datanodeUuid=04e962cb-71ed-4ca5-b582-c86dc7adf5d6, infoPort=42575, infoSecurePort=0, ipcPort=39593, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847) 2024-11-28T00:39:26,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x181a3e0ede4725c8 with lease ID 0xd49871652b7ac56a: from storage DS-bc26f365-21cc-4ccc-8495-660c9cdefc04 node DatanodeRegistration(127.0.0.1:38327, datanodeUuid=04e962cb-71ed-4ca5-b582-c86dc7adf5d6, infoPort=42575, infoSecurePort=0, ipcPort=39593, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:39:26,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4e0e908d08ea5ba with lease ID 0xd49871652b7ac569: Processing first storage report for DS-b02411ed-6165-482a-997b-74ab5d50ea57 from datanode DatanodeRegistration(127.0.0.1:46245, datanodeUuid=cc7e1181-0799-4a46-b365-fb23278a2771, infoPort=40703, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847) 2024-11-28T00:39:26,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4e0e908d08ea5ba with lease ID 0xd49871652b7ac569: from storage DS-b02411ed-6165-482a-997b-74ab5d50ea57 node DatanodeRegistration(127.0.0.1:46245, datanodeUuid=cc7e1181-0799-4a46-b365-fb23278a2771, infoPort=40703, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1015941980;c=1732754361847), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:39:26,883 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3 2024-11-28T00:39:27,105 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/zookeeper_0, clientPort=56850, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-28T00:39:27,120 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56850 2024-11-28T00:39:27,135 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:27,138 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741825_1001 (size=7) 2024-11-28T00:39:27,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741825_1001 (size=7) 2024-11-28T00:39:27,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741825_1001 (size=7) 2024-11-28T00:39:27,797 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e with version=8 2024-11-28T00:39:27,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/hbase-staging 2024-11-28T00:39:27,908 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-28T00:39:28,181 INFO [Time-limited test {}] client.ConnectionUtils(128): master/97f25515df69:0 server-side Connection retries=45 2024-11-28T00:39:28,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:28,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:28,200 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:39:28,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:28,201 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:39:28,370 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-28T00:39:28,448 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-28T00:39:28,461 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-28T00:39:28,467 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:39:28,502 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 118587 (auto-detected) 2024-11-28T00:39:28,504 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-28T00:39:28,530 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43227 2024-11-28T00:39:28,565 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43227 connecting to ZooKeeper ensemble=127.0.0.1:56850 2024-11-28T00:39:28,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:432270x0, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:39:28,685 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43227-0x1017ee15dea0000 connected 2024-11-28T00:39:28,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:28,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:28,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:39:28,814 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e, hbase.cluster.distributed=false 2024-11-28T00:39:28,857 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:39:28,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43227 2024-11-28T00:39:28,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43227 2024-11-28T00:39:28,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43227 2024-11-28T00:39:28,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43227 2024-11-28T00:39:28,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43227 2024-11-28T00:39:28,979 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/97f25515df69:0 server-side Connection retries=45 2024-11-28T00:39:28,981 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:28,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:28,982 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:39:28,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:28,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:39:28,985 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T00:39:28,989 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:39:28,990 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38585 2024-11-28T00:39:28,993 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38585 connecting to ZooKeeper ensemble=127.0.0.1:56850 2024-11-28T00:39:28,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:29,003 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:29,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385850x0, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:39:29,027 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38585-0x1017ee15dea0001 connected 2024-11-28T00:39:29,028 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:39:29,033 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T00:39:29,040 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-28T00:39:29,042 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T00:39:29,050 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:39:29,052 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38585 2024-11-28T00:39:29,056 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38585 2024-11-28T00:39:29,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38585 2024-11-28T00:39:29,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38585 2024-11-28T00:39:29,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38585 2024-11-28T00:39:29,084 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/97f25515df69:0 server-side Connection retries=45 2024-11-28T00:39:29,085 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:29,085 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:29,085 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:39:29,085 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:29,086 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:39:29,086 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T00:39:29,086 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:39:29,088 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43795 2024-11-28T00:39:29,090 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43795 connecting to ZooKeeper ensemble=127.0.0.1:56850 2024-11-28T00:39:29,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:29,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:29,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:437950x0, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:39:29,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43795-0x1017ee15dea0002 connected 2024-11-28T00:39:29,121 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:39:29,122 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T00:39:29,122 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-28T00:39:29,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T00:39:29,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:39:29,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43795 2024-11-28T00:39:29,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43795 2024-11-28T00:39:29,131 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43795 2024-11-28T00:39:29,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43795 2024-11-28T00:39:29,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43795 2024-11-28T00:39:29,169 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/97f25515df69:0 server-side Connection retries=45 2024-11-28T00:39:29,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:29,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:29,170 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:39:29,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:39:29,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:39:29,171 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T00:39:29,171 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:39:29,172 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34347 2024-11-28T00:39:29,175 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34347 connecting to ZooKeeper ensemble=127.0.0.1:56850 2024-11-28T00:39:29,177 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:29,180 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:29,198 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343470x0, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:39:29,199 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:343470x0, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:39:29,200 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T00:39:29,200 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34347-0x1017ee15dea0003 connected 2024-11-28T00:39:29,201 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-28T00:39:29,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T00:39:29,205 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:39:29,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34347 2024-11-28T00:39:29,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34347 2024-11-28T00:39:29,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34347 2024-11-28T00:39:29,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34347 2024-11-28T00:39:29,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34347 2024-11-28T00:39:29,233 DEBUG [M:0;97f25515df69:43227 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;97f25515df69:43227 2024-11-28T00:39:29,234 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/97f25515df69,43227,1732754367980 2024-11-28T00:39:29,251 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,255 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/97f25515df69,43227,1732754367980 2024-11-28T00:39:29,287 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T00:39:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T00:39:29,287 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T00:39:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,291 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-28T00:39:29,292 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/97f25515df69,43227,1732754367980 from backup master directory 2024-11-28T00:39:29,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/97f25515df69,43227,1732754367980 2024-11-28T00:39:29,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,303 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:39:29,304 WARN [master/97f25515df69:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:39:29,305 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=97f25515df69,43227,1732754367980 2024-11-28T00:39:29,307 INFO [master/97f25515df69:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-28T00:39:29,308 INFO [master/97f25515df69:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-28T00:39:29,369 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/hbase.id] with ID: 8ae27c25-4edf-4df0-ab8c-1da49524c9af 2024-11-28T00:39:29,369 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.tmp/hbase.id 2024-11-28T00:39:29,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741826_1002 (size=42) 2024-11-28T00:39:29,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741826_1002 (size=42) 2024-11-28T00:39:29,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741826_1002 (size=42) 2024-11-28T00:39:29,389 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.tmp/hbase.id]:[hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/hbase.id] 2024-11-28T00:39:29,456 INFO [master/97f25515df69:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:29,464 INFO [master/97f25515df69:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-28T00:39:29,491 INFO [master/97f25515df69:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 24ms. 2024-11-28T00:39:29,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,503 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:29,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741827_1003 (size=196) 2024-11-28T00:39:29,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741827_1003 (size=196) 2024-11-28T00:39:29,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741827_1003 (size=196) 2024-11-28T00:39:29,554 INFO [master/97f25515df69:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:39:29,557 INFO [master/97f25515df69:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-28T00:39:29,579 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:39:29,585 INFO [master/97f25515df69:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:39:29,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741828_1004 (size=1189) 2024-11-28T00:39:29,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741828_1004 (size=1189) 2024-11-28T00:39:29,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741828_1004 (size=1189) 2024-11-28T00:39:29,649 INFO [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/data/master/store 2024-11-28T00:39:29,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741829_1005 (size=34) 2024-11-28T00:39:29,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741829_1005 (size=34) 2024-11-28T00:39:29,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741829_1005 (size=34) 2024-11-28T00:39:29,687 INFO [master/97f25515df69:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-28T00:39:29,691 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:29,693 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T00:39:29,693 INFO [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:39:29,693 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:39:29,695 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T00:39:29,695 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:39:29,695 INFO [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:39:29,697 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732754369693Disabling compacts and flushes for region at 1732754369693Disabling writes for close at 1732754369695 (+2 ms)Writing region close event to WAL at 1732754369695Closed at 1732754369695 2024-11-28T00:39:29,699 WARN [master/97f25515df69:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/data/master/store/.initializing 2024-11-28T00:39:29,699 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980 2024-11-28T00:39:29,710 INFO [master/97f25515df69:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:39:29,726 INFO [master/97f25515df69:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=97f25515df69%2C43227%2C1732754367980, suffix=, logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980, archiveDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/oldWALs, maxLogs=10 2024-11-28T00:39:29,757 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730, exclude list is [], retry=0 2024-11-28T00:39:29,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45777,DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3,DISK] 2024-11-28T00:39:29,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38327,DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2,DISK] 2024-11-28T00:39:29,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46245,DS-9a7ec647-91e4-4d14-be05-a67d2857cf91,DISK] 2024-11-28T00:39:29,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-28T00:39:29,822 INFO [master/97f25515df69:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730 2024-11-28T00:39:29,824 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40703:40703),(127.0.0.1/127.0.0.1:33685:33685),(127.0.0.1/127.0.0.1:42575:42575)] 2024-11-28T00:39:29,824 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:39:29,825 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:29,829 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,830 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-28T00:39:29,907 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:29,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:29,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-28T00:39:29,918 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:29,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:29,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-28T00:39:29,923 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:29,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:29,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,928 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-28T00:39:29,928 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:29,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:29,930 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,934 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,936 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,952 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,953 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,957 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T00:39:29,963 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:39:29,982 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:39:29,983 INFO [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68605031, jitterRate=0.022294625639915466}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T00:39:29,994 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732754369843Initializing all the Stores at 1732754369846 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754369847 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754369848 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754369848Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754369849 (+1 ms)Cleaning up temporary data from old regions at 1732754369953 (+104 ms)Region opened successfully at 1732754369993 (+40 ms) 2024-11-28T00:39:29,995 INFO [master/97f25515df69:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-28T00:39:30,054 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b59a53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=97f25515df69/172.17.0.3:0 2024-11-28T00:39:30,091 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-28T00:39:30,104 INFO [master/97f25515df69:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-28T00:39:30,104 INFO [master/97f25515df69:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-28T00:39:30,107 INFO [master/97f25515df69:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-28T00:39:30,109 INFO [master/97f25515df69:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-28T00:39:30,116 INFO [master/97f25515df69:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-28T00:39:30,116 INFO [master/97f25515df69:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-28T00:39:30,160 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-28T00:39:30,174 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-28T00:39:30,220 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-28T00:39:30,224 INFO [master/97f25515df69:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-28T00:39:30,226 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-28T00:39:30,250 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-28T00:39:30,253 INFO [master/97f25515df69:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-28T00:39:30,287 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-28T00:39:30,297 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-28T00:39:30,299 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-28T00:39:30,307 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-28T00:39:30,331 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-28T00:39:30,339 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-28T00:39:30,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:39:30,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:39:30,355 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:39:30,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,356 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:39:30,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,360 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=97f25515df69,43227,1732754367980, sessionid=0x1017ee15dea0000, setting cluster-up flag (Was=false) 2024-11-28T00:39:30,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,392 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,423 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-28T00:39:30,426 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=97f25515df69,43227,1732754367980 2024-11-28T00:39:30,450 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:30,492 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-28T00:39:30,494 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=97f25515df69,43227,1732754367980 2024-11-28T00:39:30,501 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-28T00:39:30,525 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(746): ClusterId : 8ae27c25-4edf-4df0-ab8c-1da49524c9af 2024-11-28T00:39:30,526 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(746): ClusterId : 8ae27c25-4edf-4df0-ab8c-1da49524c9af 2024-11-28T00:39:30,531 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(746): ClusterId : 8ae27c25-4edf-4df0-ab8c-1da49524c9af 2024-11-28T00:39:30,532 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T00:39:30,532 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T00:39:30,532 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T00:39:30,541 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-28T00:39:30,546 INFO [master/97f25515df69:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:30,546 INFO [master/97f25515df69:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-28T00:39:30,558 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T00:39:30,559 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T00:39:30,559 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T00:39:30,559 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T00:39:30,621 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-28T00:39:30,634 INFO [master/97f25515df69:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-28T00:39:30,646 INFO [master/97f25515df69:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-28T00:39:30,655 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T00:39:30,655 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T00:39:30,653 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 97f25515df69,43227,1732754367980 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-28T00:39:30,657 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T00:39:30,658 DEBUG [RS:2;97f25515df69:34347 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@854cc5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=97f25515df69/172.17.0.3:0 2024-11-28T00:39:30,667 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T00:39:30,667 DEBUG [RS:1;97f25515df69:43795 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a0d11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=97f25515df69/172.17.0.3:0 2024-11-28T00:39:30,668 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T00:39:30,669 DEBUG [RS:0;97f25515df69:38585 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e623aad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=97f25515df69/172.17.0.3:0 2024-11-28T00:39:30,670 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/97f25515df69:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:39:30,670 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/97f25515df69:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:39:30,671 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/97f25515df69:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:39:30,671 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/97f25515df69:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:39:30,671 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/97f25515df69:0, corePoolSize=10, maxPoolSize=10 2024-11-28T00:39:30,671 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,671 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/97f25515df69:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:39:30,672 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,681 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;97f25515df69:34347 2024-11-28T00:39:30,687 INFO [RS:2;97f25515df69:34347 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-28T00:39:30,688 INFO [RS:2;97f25515df69:34347 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-28T00:39:30,688 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-28T00:39:30,689 INFO [RS:2;97f25515df69:34347 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:30,689 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-28T00:39:30,692 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(2659): reportForDuty to master=97f25515df69,43227,1732754367980 with port=34347, startcode=1732754369169 2024-11-28T00:39:30,693 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;97f25515df69:43795 2024-11-28T00:39:30,693 INFO [RS:1;97f25515df69:43795 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-28T00:39:30,693 INFO [RS:1;97f25515df69:43795 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-28T00:39:30,693 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-28T00:39:30,694 INFO [RS:1;97f25515df69:43795 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:30,694 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-28T00:39:30,694 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;97f25515df69:38585 2024-11-28T00:39:30,694 INFO [RS:0;97f25515df69:38585 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-28T00:39:30,694 INFO [RS:0;97f25515df69:38585 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-28T00:39:30,694 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-28T00:39:30,695 INFO [RS:0;97f25515df69:38585 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:30,695 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-28T00:39:30,695 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(2659): reportForDuty to master=97f25515df69,43227,1732754367980 with port=43795, startcode=1732754369084 2024-11-28T00:39:30,699 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(2659): reportForDuty to master=97f25515df69,43227,1732754367980 with port=38585, startcode=1732754368938 2024-11-28T00:39:30,702 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-28T00:39:30,703 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-28T00:39:30,712 DEBUG [RS:0;97f25515df69:38585 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T00:39:30,712 DEBUG [RS:1;97f25515df69:43795 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T00:39:30,712 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:30,712 DEBUG [RS:2;97f25515df69:34347 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T00:39:30,712 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T00:39:30,733 INFO [master/97f25515df69:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732754400733 2024-11-28T00:39:30,735 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-28T00:39:30,737 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-28T00:39:30,742 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-28T00:39:30,742 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-28T00:39:30,743 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-28T00:39:30,743 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-28T00:39:30,762 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,771 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-28T00:39:30,772 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-28T00:39:30,772 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-28T00:39:30,779 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-28T00:39:30,780 INFO [master/97f25515df69:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-28T00:39:30,781 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45717, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T00:39:30,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741831_1007 (size=1321) 2024-11-28T00:39:30,781 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38065, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T00:39:30,782 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52019, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T00:39:30,783 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/97f25515df69:0:becomeActiveMaster-HFileCleaner.large.0-1732754370782,5,FailOnTimeoutGroup] 2024-11-28T00:39:30,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741831_1007 (size=1321) 2024-11-28T00:39:30,784 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/97f25515df69:0:becomeActiveMaster-HFileCleaner.small.0-1732754370783,5,FailOnTimeoutGroup] 2024-11-28T00:39:30,784 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741831_1007 (size=1321) 2024-11-28T00:39:30,785 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-28T00:39:30,786 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,786 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,786 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-28T00:39:30,787 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:30,789 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 97f25515df69,34347,1732754369169 2024-11-28T00:39:30,792 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] master.ServerManager(517): Registering regionserver=97f25515df69,34347,1732754369169 2024-11-28T00:39:30,806 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 97f25515df69,38585,1732754368938 2024-11-28T00:39:30,806 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] master.ServerManager(517): Registering regionserver=97f25515df69,38585,1732754368938 2024-11-28T00:39:30,810 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:30,810 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44423 2024-11-28T00:39:30,810 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-28T00:39:30,812 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 97f25515df69,43795,1732754369084 2024-11-28T00:39:30,812 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:30,812 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44423 2024-11-28T00:39:30,813 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-28T00:39:30,813 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] master.ServerManager(517): Registering regionserver=97f25515df69,43795,1732754369084 2024-11-28T00:39:30,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741832_1008 (size=32) 2024-11-28T00:39:30,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741832_1008 (size=32) 2024-11-28T00:39:30,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741832_1008 (size=32) 2024-11-28T00:39:30,818 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:30,818 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44423 2024-11-28T00:39:30,818 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-28T00:39:30,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:30,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T00:39:30,828 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T00:39:30,832 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T00:39:30,832 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:30,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:30,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-28T00:39:30,837 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-28T00:39:30,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:30,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:30,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T00:39:30,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T00:39:30,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:30,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:30,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T00:39:30,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T00:39:30,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:30,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:30,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-28T00:39:30,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740 2024-11-28T00:39:30,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740 2024-11-28T00:39:30,853 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-28T00:39:30,854 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-28T00:39:30,855 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T00:39:30,857 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-28T00:39:30,861 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:39:30,862 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73245775, jitterRate=0.09144710004329681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T00:39:30,866 DEBUG [RS:2;97f25515df69:34347 {}] zookeeper.ZKUtil(111): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/97f25515df69,34347,1732754369169 2024-11-28T00:39:30,867 WARN [RS:2;97f25515df69:34347 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:39:30,867 INFO [RS:2;97f25515df69:34347 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:39:30,867 DEBUG [RS:0;97f25515df69:38585 {}] zookeeper.ZKUtil(111): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/97f25515df69,38585,1732754368938 2024-11-28T00:39:30,867 WARN [RS:0;97f25515df69:38585 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:39:30,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732754370821Initializing all the Stores at 1732754370824 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754370824Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754370828 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754370828Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754370828Cleaning up temporary data from old regions at 1732754370854 (+26 ms)Region opened successfully at 1732754370867 (+13 ms) 2024-11-28T00:39:30,867 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,34347,1732754369169 2024-11-28T00:39:30,867 INFO [RS:0;97f25515df69:38585 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:39:30,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-28T00:39:30,868 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,38585,1732754368938 2024-11-28T00:39:30,869 DEBUG [RS:1;97f25515df69:43795 {}] zookeeper.ZKUtil(111): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/97f25515df69,43795,1732754369084 2024-11-28T00:39:30,869 WARN [RS:1;97f25515df69:43795 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:39:30,869 INFO [RS:1;97f25515df69:43795 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:39:30,868 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-28T00:39:30,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-28T00:39:30,869 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,43795,1732754369084 2024-11-28T00:39:30,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T00:39:30,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T00:39:30,870 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [97f25515df69,34347,1732754369169] 2024-11-28T00:39:30,870 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [97f25515df69,38585,1732754368938] 2024-11-28T00:39:30,870 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [97f25515df69,43795,1732754369084] 2024-11-28T00:39:30,874 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-28T00:39:30,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732754370867Disabling compacts and flushes for region at 1732754370867Disabling writes for close at 1732754370869 (+2 ms)Writing region close event to WAL at 1732754370873 (+4 ms)Closed at 1732754370873 2024-11-28T00:39:30,880 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-28T00:39:30,880 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-28T00:39:30,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-28T00:39:30,900 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-28T00:39:30,907 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-28T00:39:30,912 INFO [RS:0;97f25515df69:38585 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T00:39:30,912 INFO [RS:2;97f25515df69:34347 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T00:39:30,913 INFO [RS:1;97f25515df69:43795 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T00:39:30,930 INFO [RS:1;97f25515df69:43795 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T00:39:30,930 INFO [RS:2;97f25515df69:34347 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T00:39:30,930 INFO [RS:0;97f25515df69:38585 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T00:39:30,936 INFO [RS:0;97f25515df69:38585 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T00:39:30,936 INFO [RS:2;97f25515df69:34347 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T00:39:30,937 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,937 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,937 INFO [RS:1;97f25515df69:43795 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T00:39:30,937 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,941 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-28T00:39:30,941 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-28T00:39:30,941 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-28T00:39:30,949 INFO [RS:2;97f25515df69:34347 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-28T00:39:30,952 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,952 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,952 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,952 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,953 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,953 INFO [RS:0;97f25515df69:38585 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-28T00:39:30,953 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,953 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,953 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,953 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,953 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/97f25515df69:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,954 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,955 INFO [RS:1;97f25515df69:43795 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-28T00:39:30,955 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,955 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:39:30,955 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,955 DEBUG [RS:0;97f25515df69:38585 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/97f25515df69:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:39:30,955 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,955 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,955 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,955 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/97f25515df69:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:39:30,956 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/97f25515df69:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:39:30,956 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,956 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,957 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,957 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,957 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:39:30,957 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,957 DEBUG [RS:1;97f25515df69:43795 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/97f25515df69:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:39:30,957 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/97f25515df69:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:39:30,957 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:39:30,957 DEBUG [RS:2;97f25515df69:34347 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/97f25515df69:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:39:30,975 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,975 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43795,1732754369084-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:39:30,976 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,38585,1732754368938-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:39:30,977 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,977 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,977 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,977 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,977 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:30,977 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,34347,1732754369169-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:39:30,998 INFO [RS:1;97f25515df69:43795 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T00:39:31,000 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43795,1732754369084-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,001 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,001 INFO [RS:1;97f25515df69:43795 {}] regionserver.Replication(171): 97f25515df69,43795,1732754369084 started 2024-11-28T00:39:31,005 INFO [RS:0;97f25515df69:38585 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T00:39:31,005 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,38585,1732754368938-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,005 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,006 INFO [RS:0;97f25515df69:38585 {}] regionserver.Replication(171): 97f25515df69,38585,1732754368938 started 2024-11-28T00:39:31,021 INFO [RS:2;97f25515df69:34347 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T00:39:31,022 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,34347,1732754369169-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,023 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,023 INFO [RS:2;97f25515df69:34347 {}] regionserver.Replication(171): 97f25515df69,34347,1732754369169 started 2024-11-28T00:39:31,025 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,026 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1482): Serving as 97f25515df69,43795,1732754369084, RpcServer on 97f25515df69/172.17.0.3:43795, sessionid=0x1017ee15dea0002 2024-11-28T00:39:31,027 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T00:39:31,027 DEBUG [RS:1;97f25515df69:43795 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 97f25515df69,43795,1732754369084 2024-11-28T00:39:31,027 DEBUG [RS:1;97f25515df69:43795 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '97f25515df69,43795,1732754369084' 2024-11-28T00:39:31,027 DEBUG [RS:1;97f25515df69:43795 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T00:39:31,028 DEBUG [RS:1;97f25515df69:43795 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T00:39:31,029 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T00:39:31,029 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T00:39:31,029 DEBUG [RS:1;97f25515df69:43795 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 97f25515df69,43795,1732754369084 2024-11-28T00:39:31,030 DEBUG [RS:1;97f25515df69:43795 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '97f25515df69,43795,1732754369084' 2024-11-28T00:39:31,030 DEBUG [RS:1;97f25515df69:43795 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T00:39:31,030 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,030 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1482): Serving as 97f25515df69,38585,1732754368938, RpcServer on 97f25515df69/172.17.0.3:38585, sessionid=0x1017ee15dea0001 2024-11-28T00:39:31,030 DEBUG [RS:1;97f25515df69:43795 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T00:39:31,031 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T00:39:31,031 DEBUG [RS:0;97f25515df69:38585 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 97f25515df69,38585,1732754368938 2024-11-28T00:39:31,031 DEBUG [RS:0;97f25515df69:38585 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '97f25515df69,38585,1732754368938' 2024-11-28T00:39:31,031 DEBUG [RS:0;97f25515df69:38585 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T00:39:31,031 DEBUG [RS:1;97f25515df69:43795 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T00:39:31,031 INFO [RS:1;97f25515df69:43795 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T00:39:31,031 INFO [RS:1;97f25515df69:43795 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T00:39:31,031 DEBUG [RS:0;97f25515df69:38585 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T00:39:31,032 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T00:39:31,032 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T00:39:31,032 DEBUG [RS:0;97f25515df69:38585 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 97f25515df69,38585,1732754368938 2024-11-28T00:39:31,032 DEBUG [RS:0;97f25515df69:38585 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '97f25515df69,38585,1732754368938' 2024-11-28T00:39:31,032 DEBUG [RS:0;97f25515df69:38585 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T00:39:31,033 DEBUG [RS:0;97f25515df69:38585 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T00:39:31,034 DEBUG [RS:0;97f25515df69:38585 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T00:39:31,034 INFO [RS:0;97f25515df69:38585 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T00:39:31,034 INFO [RS:0;97f25515df69:38585 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T00:39:31,041 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:31,041 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1482): Serving as 97f25515df69,34347,1732754369169, RpcServer on 97f25515df69/172.17.0.3:34347, sessionid=0x1017ee15dea0003 2024-11-28T00:39:31,041 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T00:39:31,041 DEBUG [RS:2;97f25515df69:34347 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 97f25515df69,34347,1732754369169 2024-11-28T00:39:31,042 DEBUG [RS:2;97f25515df69:34347 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '97f25515df69,34347,1732754369169' 2024-11-28T00:39:31,042 DEBUG [RS:2;97f25515df69:34347 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T00:39:31,043 DEBUG [RS:2;97f25515df69:34347 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T00:39:31,043 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T00:39:31,043 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T00:39:31,044 DEBUG [RS:2;97f25515df69:34347 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 97f25515df69,34347,1732754369169 2024-11-28T00:39:31,044 DEBUG [RS:2;97f25515df69:34347 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '97f25515df69,34347,1732754369169' 2024-11-28T00:39:31,044 DEBUG [RS:2;97f25515df69:34347 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T00:39:31,044 DEBUG [RS:2;97f25515df69:34347 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T00:39:31,045 DEBUG [RS:2;97f25515df69:34347 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T00:39:31,045 INFO [RS:2;97f25515df69:34347 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T00:39:31,045 INFO [RS:2;97f25515df69:34347 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T00:39:31,059 WARN [97f25515df69:43227 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-28T00:39:31,138 INFO [RS:0;97f25515df69:38585 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:39:31,138 INFO [RS:1;97f25515df69:43795 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:39:31,141 INFO [RS:1;97f25515df69:43795 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=97f25515df69%2C43795%2C1732754369084, suffix=, logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,43795,1732754369084, archiveDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs, maxLogs=32 2024-11-28T00:39:31,145 INFO [RS:0;97f25515df69:38585 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=97f25515df69%2C38585%2C1732754368938, suffix=, logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,38585,1732754368938, archiveDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs, maxLogs=32 2024-11-28T00:39:31,146 INFO [RS:2;97f25515df69:34347 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:39:31,150 INFO [RS:2;97f25515df69:34347 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=97f25515df69%2C34347%2C1732754369169, suffix=, logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,34347,1732754369169, archiveDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs, maxLogs=32 2024-11-28T00:39:31,167 DEBUG [RS:0;97f25515df69:38585 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,38585,1732754368938/97f25515df69%2C38585%2C1732754368938.1732754371150, exclude list is [], retry=0 2024-11-28T00:39:31,171 DEBUG [RS:1;97f25515df69:43795 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,43795,1732754369084/97f25515df69%2C43795%2C1732754369084.1732754371144, exclude list is [], retry=0 2024-11-28T00:39:31,175 DEBUG [RS:2;97f25515df69:34347 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,34347,1732754369169/97f25515df69%2C34347%2C1732754369169.1732754371153, exclude list is [], retry=0 2024-11-28T00:39:31,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46245,DS-9a7ec647-91e4-4d14-be05-a67d2857cf91,DISK] 2024-11-28T00:39:31,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38327,DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2,DISK] 2024-11-28T00:39:31,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45777,DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3,DISK] 2024-11-28T00:39:31,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45777,DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3,DISK] 2024-11-28T00:39:31,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38327,DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2,DISK] 2024-11-28T00:39:31,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46245,DS-9a7ec647-91e4-4d14-be05-a67d2857cf91,DISK] 2024-11-28T00:39:31,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38327,DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2,DISK] 2024-11-28T00:39:31,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45777,DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3,DISK] 2024-11-28T00:39:31,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46245,DS-9a7ec647-91e4-4d14-be05-a67d2857cf91,DISK] 2024-11-28T00:39:31,222 INFO [RS:2;97f25515df69:34347 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,34347,1732754369169/97f25515df69%2C34347%2C1732754369169.1732754371153 2024-11-28T00:39:31,224 DEBUG [RS:2;97f25515df69:34347 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42575:42575),(127.0.0.1/127.0.0.1:40703:40703),(127.0.0.1/127.0.0.1:33685:33685)] 2024-11-28T00:39:31,232 INFO [RS:1;97f25515df69:43795 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,43795,1732754369084/97f25515df69%2C43795%2C1732754369084.1732754371144 2024-11-28T00:39:31,233 INFO [RS:0;97f25515df69:38585 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,38585,1732754368938/97f25515df69%2C38585%2C1732754368938.1732754371150 2024-11-28T00:39:31,233 DEBUG [RS:1;97f25515df69:43795 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40703:40703),(127.0.0.1/127.0.0.1:33685:33685),(127.0.0.1/127.0.0.1:42575:42575)] 2024-11-28T00:39:31,234 DEBUG [RS:0;97f25515df69:38585 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33685:33685),(127.0.0.1/127.0.0.1:42575:42575),(127.0.0.1/127.0.0.1:40703:40703)] 2024-11-28T00:39:31,312 DEBUG [97f25515df69:43227 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-28T00:39:31,323 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:39:31,329 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:39:31,329 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:39:31,329 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:39:31,329 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:39:31,329 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:39:31,330 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:39:31,330 INFO [97f25515df69:43227 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:39:31,330 INFO [97f25515df69:43227 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:39:31,330 INFO [97f25515df69:43227 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:39:31,330 DEBUG [97f25515df69:43227 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:39:31,337 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:39:31,343 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 97f25515df69,43795,1732754369084, state=OPENING 2024-11-28T00:39:31,355 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-28T00:39:31,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:31,366 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:31,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:31,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:31,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,369 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-28T00:39:31,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:39:31,585 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:39:31,593 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34099, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:39:31,620 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-28T00:39:31,621 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:39:31,622 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-28T00:39:31,633 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=97f25515df69%2C43795%2C1732754369084.meta, suffix=.meta, logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,43795,1732754369084, archiveDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs, maxLogs=32 2024-11-28T00:39:31,659 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,43795,1732754369084/97f25515df69%2C43795%2C1732754369084.meta.1732754371635.meta, exclude list is [], retry=0 2024-11-28T00:39:31,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46245,DS-9a7ec647-91e4-4d14-be05-a67d2857cf91,DISK] 2024-11-28T00:39:31,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45777,DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3,DISK] 2024-11-28T00:39:31,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38327,DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2,DISK] 2024-11-28T00:39:31,692 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,43795,1732754369084/97f25515df69%2C43795%2C1732754369084.meta.1732754371635.meta 2024-11-28T00:39:31,693 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40703:40703),(127.0.0.1/127.0.0.1:42575:42575),(127.0.0.1/127.0.0.1:33685:33685)] 2024-11-28T00:39:31,693 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:39:31,694 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-28T00:39:31,696 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:31,697 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-28T00:39:31,700 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-28T00:39:31,702 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-28T00:39:31,716 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-28T00:39:31,718 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:31,718 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-28T00:39:31,718 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-28T00:39:31,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T00:39:31,729 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T00:39:31,729 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:31,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:31,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-28T00:39:31,733 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-28T00:39:31,733 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:31,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:31,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T00:39:31,745 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T00:39:31,745 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:31,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:31,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T00:39:31,754 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T00:39:31,755 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:31,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:39:31,756 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-28T00:39:31,758 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740 2024-11-28T00:39:31,763 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740 2024-11-28T00:39:31,767 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-28T00:39:31,767 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-28T00:39:31,768 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T00:39:31,772 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-28T00:39:31,775 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66900333, jitterRate=-0.0031073540449142456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T00:39:31,775 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-28T00:39:31,780 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732754371719Writing region info on filesystem at 1732754371719Initializing all the Stores at 1732754371724 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754371724Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754371726 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754371726Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754371726Cleaning up temporary data from old regions at 1732754371767 (+41 ms)Running coprocessor post-open hooks at 1732754371775 (+8 ms)Region opened successfully at 1732754371779 (+4 ms) 2024-11-28T00:39:31,789 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732754371552 2024-11-28T00:39:31,808 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-28T00:39:31,809 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-28T00:39:31,811 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:39:31,816 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 97f25515df69,43795,1732754369084, state=OPEN 2024-11-28T00:39:31,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:39:31,865 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:39:31,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:39:31,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:39:31,867 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:39:31,867 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=97f25515df69,43795,1732754369084 2024-11-28T00:39:31,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-28T00:39:31,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=97f25515df69,43795,1732754369084 in 497 msec 2024-11-28T00:39:31,903 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-28T00:39:31,904 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-28T00:39:31,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-28T00:39:31,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 996 msec 2024-11-28T00:39:31,973 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:31,975 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:32,015 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:32,021 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56977, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:32,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5110 sec 2024-11-28T00:39:32,069 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732754372069, completionTime=-1 2024-11-28T00:39:32,073 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-28T00:39:32,074 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-28T00:39:32,116 INFO [master/97f25515df69:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-28T00:39:32,116 INFO [master/97f25515df69:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732754432116 2024-11-28T00:39:32,116 INFO [master/97f25515df69:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732754492116 2024-11-28T00:39:32,116 INFO [master/97f25515df69:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 42 msec 2024-11-28T00:39:32,118 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:39:32,128 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43227,1732754367980-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:32,128 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43227,1732754367980-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:32,129 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43227,1732754367980-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:32,132 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-97f25515df69:43227, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:32,144 DEBUG [master/97f25515df69:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-28T00:39:32,146 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:32,150 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:32,201 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.896sec 2024-11-28T00:39:32,214 INFO [master/97f25515df69:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-28T00:39:32,215 INFO [master/97f25515df69:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-28T00:39:32,217 INFO [master/97f25515df69:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-28T00:39:32,218 INFO [master/97f25515df69:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-28T00:39:32,218 INFO [master/97f25515df69:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-28T00:39:32,219 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43227,1732754367980-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:39:32,220 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43227,1732754367980-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-28T00:39:32,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ff18a86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:32,266 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-28T00:39:32,266 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-28T00:39:32,274 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:32,278 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:32,284 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-28T00:39:32,285 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 97f25515df69,43227,1732754367980 2024-11-28T00:39:32,288 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@27d72b9e 2024-11-28T00:39:32,293 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T00:39:32,297 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42115, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T00:39:32,306 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T00:39:32,319 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:32,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-28T00:39:32,324 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:32,325 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:32,326 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:39:32,326 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ce47fd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:32,326 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:32,326 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:32,327 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-28T00:39:32,381 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:32,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:39:32,386 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:32,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:39:32,391 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:32,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab0cf03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:32,406 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:32,425 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:32,426 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:32,439 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36336, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:32,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=97f25515df69,43227,1732754367980 2024-11-28T00:39:32,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-28T00:39:32,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/test.cache.data in system properties and HBase conf 2024-11-28T00:39:32,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.tmp.dir in system properties and HBase conf 2024-11-28T00:39:32,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir in system properties and HBase conf 2024-11-28T00:39:32,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-28T00:39:32,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-28T00:39:32,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-28T00:39:32,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/nfs.dump.dir in system properties and HBase conf 2024-11-28T00:39:32,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir in system properties and HBase conf 2024-11-28T00:39:32,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:39:32,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-28T00:39:32,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-28T00:39:32,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741837_1013 (size=349) 2024-11-28T00:39:32,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741837_1013 (size=349) 2024-11-28T00:39:32,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741837_1013 (size=349) 2024-11-28T00:39:32,481 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 47c99a40beabefa004915b0134843184, NAME => 'hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:32,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:39:32,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741838_1014 (size=36) 2024-11-28T00:39:32,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741838_1014 (size=36) 2024-11-28T00:39:32,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741838_1014 (size=36) 2024-11-28T00:39:32,534 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:32,535 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 47c99a40beabefa004915b0134843184, disabling compactions & flushes 2024-11-28T00:39:32,535 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:39:32,535 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:39:32,535 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. after waiting 0 ms 2024-11-28T00:39:32,535 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:39:32,535 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:39:32,535 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 47c99a40beabefa004915b0134843184: Waiting for close lock at 1732754372534Disabling compacts and flushes for region at 1732754372534Disabling writes for close at 1732754372535 (+1 ms)Writing region close event to WAL at 1732754372535Closed at 1732754372535 2024-11-28T00:39:32,540 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:39:32,570 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732754372542"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754372542"}]},"ts":"1732754372542"} 2024-11-28T00:39:32,576 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-28T00:39:32,582 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:39:32,591 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754372582"}]},"ts":"1732754372582"} 2024-11-28T00:39:32,598 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-28T00:39:32,599 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:39:32,600 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:39:32,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:39:32,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:39:32,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:39:32,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:39:32,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:39:32,601 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:39:32,601 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:39:32,601 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:39:32,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:39:32,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=47c99a40beabefa004915b0134843184, ASSIGN}] 2024-11-28T00:39:32,609 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=47c99a40beabefa004915b0134843184, ASSIGN 2024-11-28T00:39:32,613 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=47c99a40beabefa004915b0134843184, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:39:32,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741839_1015 (size=592039) 2024-11-28T00:39:32,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741839_1015 (size=592039) 2024-11-28T00:39:32,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741839_1015 (size=592039) 2024-11-28T00:39:32,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741840_1016 (size=1663647) 2024-11-28T00:39:32,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741840_1016 (size=1663647) 2024-11-28T00:39:32,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741840_1016 (size=1663647) 2024-11-28T00:39:32,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:39:32,766 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:39:32,768 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=47c99a40beabefa004915b0134843184, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:39:32,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=47c99a40beabefa004915b0134843184, ASSIGN because future has completed 2024-11-28T00:39:32,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 47c99a40beabefa004915b0134843184, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:39:33,004 INFO [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:39:33,005 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 47c99a40beabefa004915b0134843184, NAME => 'hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:39:33,006 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. service=AccessControlService 2024-11-28T00:39:33,006 INFO [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:33,006 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,006 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:33,007 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,007 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,010 INFO [StoreOpener-47c99a40beabefa004915b0134843184-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,014 INFO [StoreOpener-47c99a40beabefa004915b0134843184-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47c99a40beabefa004915b0134843184 columnFamilyName l 2024-11-28T00:39:33,014 DEBUG [StoreOpener-47c99a40beabefa004915b0134843184-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:33,016 INFO [StoreOpener-47c99a40beabefa004915b0134843184-1 {}] regionserver.HStore(327): Store=47c99a40beabefa004915b0134843184/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:33,016 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,018 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,019 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,021 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,021 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,025 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:39:33,044 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:39:33,045 INFO [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 47c99a40beabefa004915b0134843184; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61264282, jitterRate=-0.08709105849266052}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:39:33,046 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 47c99a40beabefa004915b0134843184 2024-11-28T00:39:33,049 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 47c99a40beabefa004915b0134843184: Running coprocessor pre-open hook at 1732754373007Writing region info on filesystem at 1732754373007Initializing all the Stores at 1732754373010 (+3 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754373010Cleaning up temporary data from old regions at 1732754373021 (+11 ms)Running coprocessor post-open hooks at 1732754373046 (+25 ms)Region opened successfully at 1732754373049 (+3 ms) 2024-11-28T00:39:33,051 INFO [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., pid=6, masterSystemTime=1732754372971 2024-11-28T00:39:33,060 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:39:33,060 INFO [RS_OPEN_PRIORITY_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:39:33,061 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=47c99a40beabefa004915b0134843184, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:39:33,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 47c99a40beabefa004915b0134843184, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:39:33,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-28T00:39:33,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 47c99a40beabefa004915b0134843184, server=97f25515df69,43795,1732754369084 in 295 msec 2024-11-28T00:39:33,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-28T00:39:33,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=47c99a40beabefa004915b0134843184, ASSIGN in 475 msec 2024-11-28T00:39:33,102 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:39:33,102 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754373102"}]},"ts":"1732754373102"} 2024-11-28T00:39:33,108 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-28T00:39:33,115 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:39:33,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 801 msec 2024-11-28T00:39:33,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:39:33,540 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-28T00:39:33,547 DEBUG [master/97f25515df69:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-28T00:39:33,549 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-28T00:39:33,550 INFO [master/97f25515df69:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=97f25515df69,43227,1732754367980-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:39:34,416 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:34,448 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:34,803 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:34,804 WARN [Thread-383 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-28T00:39:34,805 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:34,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:34,810 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:34,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:34,810 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:34,810 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:39:34,810 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:39:34,811 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a682620{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:34,811 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3748e205{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:34,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:34,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8c8026f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:34,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32f7927f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:34,963 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-11-28T00:39:34,963 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-11-28T00:39:34,963 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-28T00:39:34,965 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-28T00:39:35,024 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:35,319 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:35,660 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-11-28T00:39:35,686 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c81a02c{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-43225-hadoop-yarn-common-3_4_1_jar-_-any-12465175364253595683/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-28T00:39:35,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26bd67ae{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-45881-hadoop-yarn-common-3_4_1_jar-_-any-13463293979406274752/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-28T00:39:35,687 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@703bd321{HTTP/1.1, (http/1.1)}{localhost:43225} 2024-11-28T00:39:35,687 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f470f8a{HTTP/1.1, (http/1.1)}{localhost:45881} 2024-11-28T00:39:35,687 INFO [Time-limited test {}] server.Server(415): Started @17499ms 2024-11-28T00:39:35,687 INFO [Thread-383 {}] server.Server(415): Started @17499ms 2024-11-28T00:39:35,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741841_1017 (size=5) 2024-11-28T00:39:35,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741841_1017 (size=5) 2024-11-28T00:39:35,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741841_1017 (size=5) 2024-11-28T00:39:37,149 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-28T00:39:37,156 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:37,199 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-28T00:39:37,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:37,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:37,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:37,225 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-28T00:39:37,227 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:37,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15a2daaa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:37,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44dd1d2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:37,241 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:39:37,296 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-28T00:39:37,296 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-28T00:39:37,296 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-28T00:39:37,296 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-28T00:39:37,312 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:37,348 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:37,369 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-28T00:39:37,371 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-28T00:39:37,590 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:37,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b49ff65{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-38831-hadoop-yarn-common-3_4_1_jar-_-any-2944467393201274585/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:39:37,605 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24a77e77{HTTP/1.1, (http/1.1)}{localhost:38831} 2024-11-28T00:39:37,605 INFO [Time-limited test {}] server.Server(415): Started @19417ms 2024-11-28T00:39:37,853 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-28T00:39:37,856 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:37,879 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-28T00:39:37,880 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:39:37,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:39:37,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:39:37,895 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-28T00:39:37,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:39:37,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e5c03a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:39:37,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78cbf550{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:39:37,986 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-28T00:39:37,986 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-28T00:39:37,986 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-28T00:39:37,986 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-28T00:39:38,005 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:38,013 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:38,175 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:39:38,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@485fbdad{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/java.io.tmpdir/jetty-localhost-45987-hadoop-yarn-common-3_4_1_jar-_-any-13996762198783843615/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:39:38,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6dc79a2c{HTTP/1.1, (http/1.1)}{localhost:45987} 2024-11-28T00:39:38,212 INFO [Time-limited test {}] server.Server(415): Started @20024ms 2024-11-28T00:39:38,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-28T00:39:38,276 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:39:38,331 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=717, OpenFileDescriptor=778, MaxFileDescriptor=1048576, SystemLoadAverage=416, ProcessCount=11, AvailableMemoryMB=6765 2024-11-28T00:39:38,334 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=717 is superior to 500 2024-11-28T00:39:38,343 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-28T00:39:38,349 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 97f25515df69,43227,1732754367980 2024-11-28T00:39:38,349 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3e6c7409 2024-11-28T00:39:38,350 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T00:39:38,364 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49256, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T00:39:38,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:39:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:38,373 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:39:38,376 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-11-28T00:39:38,379 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:39:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:39:38,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741842_1018 (size=458) 2024-11-28T00:39:38,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741842_1018 (size=458) 2024-11-28T00:39:38,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741842_1018 (size=458) 2024-11-28T00:39:38,425 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5a5b4ebd73e242692e8c5b1b9f96994f, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:38,425 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5081119658eb6f560e50275c3a19a0ce, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:38,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:39:38,443 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-28T00:39:38,444 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-28T00:39:38,444 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-28T00:39:38,446 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-28T00:39:38,446 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-28T00:39:38,447 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:39:38,447 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-28T00:39:38,448 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-28T00:39:38,448 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-28T00:39:38,448 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:39:38,448 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-28T00:39:38,449 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T00:39:38,449 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-28T00:39:38,449 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-28T00:39:38,449 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-28T00:39:38,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741844_1020 (size=83) 2024-11-28T00:39:38,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741844_1020 (size=83) 2024-11-28T00:39:38,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741844_1020 (size=83) 2024-11-28T00:39:38,484 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:38,484 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 5081119658eb6f560e50275c3a19a0ce, disabling compactions & flushes 2024-11-28T00:39:38,484 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:38,484 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:38,485 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. after waiting 0 ms 2024-11-28T00:39:38,485 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:38,485 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:38,485 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5081119658eb6f560e50275c3a19a0ce: Waiting for close lock at 1732754378484Disabling compacts and flushes for region at 1732754378484Disabling writes for close at 1732754378485 (+1 ms)Writing region close event to WAL at 1732754378485Closed at 1732754378485 2024-11-28T00:39:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:39:38,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741843_1019 (size=83) 2024-11-28T00:39:38,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741843_1019 (size=83) 2024-11-28T00:39:38,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741843_1019 (size=83) 2024-11-28T00:39:38,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:38,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 5a5b4ebd73e242692e8c5b1b9f96994f, disabling compactions & flushes 2024-11-28T00:39:38,493 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:38,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:38,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. after waiting 0 ms 2024-11-28T00:39:38,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:38,493 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:38,494 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5a5b4ebd73e242692e8c5b1b9f96994f: Waiting for close lock at 1732754378493Disabling compacts and flushes for region at 1732754378493Disabling writes for close at 1732754378493Writing region close event to WAL at 1732754378493Closed at 1732754378493 2024-11-28T00:39:38,496 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:39:38,496 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732754378496"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754378496"}]},"ts":"1732754378496"} 2024-11-28T00:39:38,496 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732754378496"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754378496"}]},"ts":"1732754378496"} 2024-11-28T00:39:38,528 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:39:38,530 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:39:38,530 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754378530"}]},"ts":"1732754378530"} 2024-11-28T00:39:38,534 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-28T00:39:38,535 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:39:38,537 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:39:38,537 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:39:38,537 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:39:38,537 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:39:38,538 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:39:38,538 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:39:38,538 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:39:38,538 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:39:38,538 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:39:38,538 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:39:38,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, ASSIGN}] 2024-11-28T00:39:38,541 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, ASSIGN 2024-11-28T00:39:38,542 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, ASSIGN 2024-11-28T00:39:38,544 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:39:38,544 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:39:38,694 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:39:38,694 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=5a5b4ebd73e242692e8c5b1b9f96994f, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:38,694 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=5081119658eb6f560e50275c3a19a0ce, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:39:38,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, ASSIGN because future has completed 2024-11-28T00:39:38,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:39:38,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:39:38,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, ASSIGN because future has completed 2024-11-28T00:39:38,701 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5081119658eb6f560e50275c3a19a0ce, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:39:38,853 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:39:38,855 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:39:38,870 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49285, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:39:38,870 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37377, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:39:38,877 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:38,877 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 5081119658eb6f560e50275c3a19a0ce, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:39:38,878 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. service=AccessControlService 2024-11-28T00:39:38,878 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:38,879 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,879 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:38,879 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,879 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,879 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:38,879 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 5a5b4ebd73e242692e8c5b1b9f96994f, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:39:38,880 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. service=AccessControlService 2024-11-28T00:39:38,880 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:38,880 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,881 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:38,881 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,881 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,890 INFO [StoreOpener-5a5b4ebd73e242692e8c5b1b9f96994f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,896 INFO [StoreOpener-5081119658eb6f560e50275c3a19a0ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,898 INFO [StoreOpener-5081119658eb6f560e50275c3a19a0ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5081119658eb6f560e50275c3a19a0ce columnFamilyName cf 2024-11-28T00:39:38,898 INFO [StoreOpener-5a5b4ebd73e242692e8c5b1b9f96994f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a5b4ebd73e242692e8c5b1b9f96994f columnFamilyName cf 2024-11-28T00:39:38,904 DEBUG [StoreOpener-5081119658eb6f560e50275c3a19a0ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:38,904 DEBUG [StoreOpener-5a5b4ebd73e242692e8c5b1b9f96994f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:38,905 INFO [StoreOpener-5a5b4ebd73e242692e8c5b1b9f96994f-1 {}] regionserver.HStore(327): Store=5a5b4ebd73e242692e8c5b1b9f96994f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:38,906 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,906 INFO [StoreOpener-5081119658eb6f560e50275c3a19a0ce-1 {}] regionserver.HStore(327): Store=5081119658eb6f560e50275c3a19a0ce/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:38,907 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,913 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,914 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,915 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,916 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,924 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,927 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,933 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,933 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:39:38,934 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,934 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,934 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 5081119658eb6f560e50275c3a19a0ce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63622078, jitterRate=-0.051957160234451294}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:39:38,935 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:38,936 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 5081119658eb6f560e50275c3a19a0ce: Running coprocessor pre-open hook at 1732754378879Writing region info on filesystem at 1732754378879Initializing all the Stores at 1732754378885 (+6 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754378885Cleaning up temporary data from old regions at 1732754378916 (+31 ms)Running coprocessor post-open hooks at 1732754378935 (+19 ms)Region opened successfully at 1732754378936 (+1 ms) 2024-11-28T00:39:38,939 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce., pid=11, masterSystemTime=1732754378855 2024-11-28T00:39:38,942 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,946 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=5081119658eb6f560e50275c3a19a0ce, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:39:38,948 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:38,949 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:38,950 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:39:38,951 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 5a5b4ebd73e242692e8c5b1b9f96994f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72805654, jitterRate=0.08488878607749939}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:39:38,952 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:38,952 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 5a5b4ebd73e242692e8c5b1b9f96994f: Running coprocessor pre-open hook at 1732754378881Writing region info on filesystem at 1732754378881Initializing all the Stores at 1732754378885 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754378885Cleaning up temporary data from old regions at 1732754378934 (+49 ms)Running coprocessor post-open hooks at 1732754378952 (+18 ms)Region opened successfully at 1732754378952 2024-11-28T00:39:38,954 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f., pid=10, masterSystemTime=1732754378853 2024-11-28T00:39:38,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5081119658eb6f560e50275c3a19a0ce, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:39:38,961 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:38,961 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:38,966 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=5a5b4ebd73e242692e8c5b1b9f96994f, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:38,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:39:38,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-28T00:39:38,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 5081119658eb6f560e50275c3a19a0ce, server=97f25515df69,34347,1732754369169 in 259 msec 2024-11-28T00:39:38,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, ASSIGN in 433 msec 2024-11-28T00:39:38,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-28T00:39:38,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f, server=97f25515df69,38585,1732754368938 in 291 msec 2024-11-28T00:39:39,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-28T00:39:39,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, ASSIGN in 459 msec 2024-11-28T00:39:39,006 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:39:39,007 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754379006"}]},"ts":"1732754379006"} 2024-11-28T00:39:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:39:39,017 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-28T00:39:39,020 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:39:39,025 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-28T00:39:39,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:39:39,045 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:39,045 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:39,046 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:39,048 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40567, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-28T00:39:39,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:39:39,061 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:39:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:39,096 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:39,096 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:39:39,117 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:39,117 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:39,117 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:39,117 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:39,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:39,122 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 752 msec 2024-11-28T00:39:39,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:39:39,518 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:39:39,521 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:39,527 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:39,528 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:39,529 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:39:39,532 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:39,547 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:39,557 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f., hostname=97f25515df69,38585,1732754368938, seqNum=2] 2024-11-28T00:39:39,557 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:39,560 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:39,564 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:39,566 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:39,567 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:39,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:39:39,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754379578 (current time:1732754379578). 2024-11-28T00:39:39,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:39:39,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-28T00:39:39,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:39:39,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1380a02f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:39,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:39,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:39,582 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:39,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:39,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:39,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@291a0afd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:39,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:39,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:39,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:39,585 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49274, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:39,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6efaa06d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:39,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:39,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:39,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:39,591 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36348, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:39,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227. 2024-11-28T00:39:39,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:39:39,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:39,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:39,603 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:39:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@613038fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:39,606 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:39,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:39,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:39,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2651339a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:39,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:39,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:39,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:39,609 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49290, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:39,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@389d6b5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:39,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:39,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:39,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:39,614 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:39,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:39:39,621 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227. 2024-11-28T00:39:39,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:39:39,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:39,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:39,621 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:39:39,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:39:39,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:39:39,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:39:39,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-28T00:39:39,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:39:39,640 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:39:39,647 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:39:39,662 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:39:39,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741845_1021 (size=215) 2024-11-28T00:39:39,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741845_1021 (size=215) 2024-11-28T00:39:39,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741845_1021 (size=215) 2024-11-28T00:39:39,681 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:39:39,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce}] 2024-11-28T00:39:39,690 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:39,690 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:39,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:39:39,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-28T00:39:39,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-28T00:39:39,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:39,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:39,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5081119658eb6f560e50275c3a19a0ce: 2024-11-28T00:39:39,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 5a5b4ebd73e242692e8c5b1b9f96994f: 2024-11-28T00:39:39,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:39:39,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:39:39,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:39,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:39,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:39:39,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:39:39,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:39:39,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:39:39,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741846_1022 (size=86) 2024-11-28T00:39:39,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741846_1022 (size=86) 2024-11-28T00:39:39,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741846_1022 (size=86) 2024-11-28T00:39:39,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741847_1023 (size=86) 2024-11-28T00:39:39,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741847_1023 (size=86) 2024-11-28T00:39:39,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741847_1023 (size=86) 2024-11-28T00:39:39,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:39,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-28T00:39:39,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-28T00:39:39,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-28T00:39:39,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:39,891 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:39,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-28T00:39:39,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:39,891 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:39,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f in 209 msec 2024-11-28T00:39:39,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-28T00:39:39,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce in 209 msec 2024-11-28T00:39:39,901 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:39:39,904 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:39:39,907 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:39:39,907 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:39:39,907 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:39,909 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:39:39,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741848_1024 (size=78) 2024-11-28T00:39:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741848_1024 (size=78) 2024-11-28T00:39:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741848_1024 (size=78) 2024-11-28T00:39:39,935 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:39:39,935 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:39,938 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:39:40,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741849_1025 (size=713) 2024-11-28T00:39:40,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741849_1025 (size=713) 2024-11-28T00:39:40,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741849_1025 (size=713) 2024-11-28T00:39:40,052 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:39:40,092 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:39:40,097 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:40,105 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:39:40,106 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-28T00:39:40,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 475 msec 2024-11-28T00:39:40,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:39:40,269 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:39:40,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:39:40,301 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:39:40,317 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:40,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:40,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:40,328 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:39:40,332 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:40,343 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:40,356 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:40,363 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:39:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754380363 (current time:1732754380363). 2024-11-28T00:39:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:39:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-28T00:39:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:39:40,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c5a2dc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:40,366 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:40,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:40,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:40,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@772da37d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:40,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:40,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:40,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:40,370 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49314, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:40,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76694cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:40,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:40,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:40,378 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36366, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:40,381 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:39:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:39:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:40,381 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:39:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ad1bcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:40,386 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:40,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:40,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:40,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25d568ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:40,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:40,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:40,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:40,389 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:40,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f2da210, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:40,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:40,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:40,395 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:40,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:39:40,401 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:39:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:39:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:40,402 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:39:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:39:40,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:39:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:39:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-28T00:39:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:39:40,410 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:39:40,413 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:39:40,424 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:39:40,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741850_1026 (size=210) 2024-11-28T00:39:40,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741850_1026 (size=210) 2024-11-28T00:39:40,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741850_1026 (size=210) 2024-11-28T00:39:40,469 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:39:40,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce}] 2024-11-28T00:39:40,472 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:40,472 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:39:40,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-28T00:39:40,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-28T00:39:40,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:40,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:40,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 5081119658eb6f560e50275c3a19a0ce 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:39:40,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 5a5b4ebd73e242692e8c5b1b9f96994f 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:39:40,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128af8fff7502d14474b67847a75c20d802_5081119658eb6f560e50275c3a19a0ce is 71, key is 11f5cfed0f9723c7644b070870fe3626/cf:q/1732754380301/Put/seqid=0 2024-11-28T00:39:40,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112822f33b1ccdc3490abd2d78fb3eedb947_5a5b4ebd73e242692e8c5b1b9f96994f is 71, key is 03e0d098aaa6c638e2311878038449c9/cf:q/1732754380296/Put/seqid=0 2024-11-28T00:39:40,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741852_1028 (size=5102) 2024-11-28T00:39:40,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741852_1028 (size=5102) 2024-11-28T00:39:40,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741852_1028 (size=5102) 2024-11-28T00:39:40,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741851_1027 (size=8171) 2024-11-28T00:39:40,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741851_1027 (size=8171) 2024-11-28T00:39:40,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741851_1027 (size=8171) 2024-11-28T00:39:40,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:39:40,812 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128af8fff7502d14474b67847a75c20d802_5081119658eb6f560e50275c3a19a0ce to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128af8fff7502d14474b67847a75c20d802_5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:40,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/.tmp/cf/8ede43c4adfc416986840db283588899, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=5081119658eb6f560e50275c3a19a0ce] 2024-11-28T00:39:40,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/.tmp/cf/8ede43c4adfc416986840db283588899 is 224, key is 1ff2b1a1105ac32f856eca9f692ae0228/cf:q/1732754380301/Put/seqid=0 2024-11-28T00:39:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741853_1029 (size=15717) 2024-11-28T00:39:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741853_1029 (size=15717) 2024-11-28T00:39:40,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741853_1029 (size=15717) 2024-11-28T00:39:40,899 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/.tmp/cf/8ede43c4adfc416986840db283588899 2024-11-28T00:39:40,913 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/.tmp/cf/8ede43c4adfc416986840db283588899 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/cf/8ede43c4adfc416986840db283588899 2024-11-28T00:39:40,931 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/cf/8ede43c4adfc416986840db283588899, entries=47, sequenceid=6, filesize=15.3 K 2024-11-28T00:39:40,941 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 5081119658eb6f560e50275c3a19a0ce in 309ms, sequenceid=6, compaction requested=false 2024-11-28T00:39:40,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-28T00:39:40,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 5081119658eb6f560e50275c3a19a0ce: 2024-11-28T00:39:40,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:39:40,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:40,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:39:40,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/cf/8ede43c4adfc416986840db283588899] hfiles 2024-11-28T00:39:40,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/cf/8ede43c4adfc416986840db283588899 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:41,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741854_1030 (size=125) 2024-11-28T00:39:41,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741854_1030 (size=125) 2024-11-28T00:39:41,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741854_1030 (size=125) 2024-11-28T00:39:41,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:39:41,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-28T00:39:41,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-28T00:39:41,018 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:41,019 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:41,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5081119658eb6f560e50275c3a19a0ce in 551 msec 2024-11-28T00:39:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:39:41,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:41,148 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112822f33b1ccdc3490abd2d78fb3eedb947_5a5b4ebd73e242692e8c5b1b9f96994f to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024112822f33b1ccdc3490abd2d78fb3eedb947_5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:41,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/.tmp/cf/c743017cf6d048c7a911df243d06539f, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=5a5b4ebd73e242692e8c5b1b9f96994f] 2024-11-28T00:39:41,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/.tmp/cf/c743017cf6d048c7a911df243d06539f is 224, key is 031a9c32f2913d757a9d13d0da3143c31/cf:q/1732754380296/Put/seqid=0 2024-11-28T00:39:41,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741855_1031 (size=5978) 2024-11-28T00:39:41,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741855_1031 (size=5978) 2024-11-28T00:39:41,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741855_1031 (size=5978) 2024-11-28T00:39:41,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/.tmp/cf/c743017cf6d048c7a911df243d06539f 2024-11-28T00:39:41,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/.tmp/cf/c743017cf6d048c7a911df243d06539f as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/cf/c743017cf6d048c7a911df243d06539f 2024-11-28T00:39:41,198 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/cf/c743017cf6d048c7a911df243d06539f, entries=3, sequenceid=6, filesize=5.8 K 2024-11-28T00:39:41,199 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 5a5b4ebd73e242692e8c5b1b9f96994f in 572ms, sequenceid=6, compaction requested=false 2024-11-28T00:39:41,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 5a5b4ebd73e242692e8c5b1b9f96994f: 2024-11-28T00:39:41,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:39:41,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:41,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:39:41,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/cf/c743017cf6d048c7a911df243d06539f] hfiles 2024-11-28T00:39:41,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/cf/c743017cf6d048c7a911df243d06539f for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:41,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741856_1032 (size=125) 2024-11-28T00:39:41,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741856_1032 (size=125) 2024-11-28T00:39:41,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741856_1032 (size=125) 2024-11-28T00:39:41,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:39:41,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-28T00:39:41,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-28T00:39:41,224 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:41,224 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:41,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-11-28T00:39:41,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f in 756 msec 2024-11-28T00:39:41,231 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:39:41,234 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:39:41,238 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:39:41,238 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:39:41,238 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:41,244 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128af8fff7502d14474b67847a75c20d802_5081119658eb6f560e50275c3a19a0ce, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024112822f33b1ccdc3490abd2d78fb3eedb947_5a5b4ebd73e242692e8c5b1b9f96994f] hfiles 2024-11-28T00:39:41,244 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128af8fff7502d14474b67847a75c20d802_5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:39:41,244 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024112822f33b1ccdc3490abd2d78fb3eedb947_5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:39:41,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741857_1033 (size=309) 2024-11-28T00:39:41,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741857_1033 (size=309) 2024-11-28T00:39:41,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741857_1033 (size=309) 2024-11-28T00:39:41,267 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:39:41,267 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:41,268 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:41,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741858_1034 (size=1023) 2024-11-28T00:39:41,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741858_1034 (size=1023) 2024-11-28T00:39:41,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741858_1034 (size=1023) 2024-11-28T00:39:41,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:39:41,311 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:39:41,312 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:41,315 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:39:41,315 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-28T00:39:41,318 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 911 msec 2024-11-28T00:39:41,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:39:41,551 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:39:41,582 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:39:41,585 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:39:41,590 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37266, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:39:41,593 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:39:41,595 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-28T00:39:41,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51178, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:39:41,607 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-28T00:39:41,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36386, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:39:41,617 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-28T00:39:41,621 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:39:41,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:41,626 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:39:41,627 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:41,627 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-11-28T00:39:41,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:39:41,630 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:39:41,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741859_1035 (size=390) 2024-11-28T00:39:41,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741859_1035 (size=390) 2024-11-28T00:39:41,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741859_1035 (size=390) 2024-11-28T00:39:41,713 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6acff8215866b10794f82068c000d15a, NAME => 'testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:39:41,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741860_1036 (size=75) 2024-11-28T00:39:41,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741860_1036 (size=75) 2024-11-28T00:39:41,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741860_1036 (size=75) 2024-11-28T00:39:41,812 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:41,813 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 6acff8215866b10794f82068c000d15a, disabling compactions & flushes 2024-11-28T00:39:41,813 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:41,813 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:41,814 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. after waiting 0 ms 2024-11-28T00:39:41,814 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:41,814 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:41,816 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6acff8215866b10794f82068c000d15a: Waiting for close lock at 1732754381812Disabling compacts and flushes for region at 1732754381812Disabling writes for close at 1732754381814 (+2 ms)Writing region close event to WAL at 1732754381814Closed at 1732754381814 2024-11-28T00:39:41,821 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:39:41,822 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732754381821"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754381821"}]},"ts":"1732754381821"} 2024-11-28T00:39:41,831 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-28T00:39:41,833 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:39:41,834 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754381834"}]},"ts":"1732754381834"} 2024-11-28T00:39:41,839 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-28T00:39:41,840 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:39:41,842 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:39:41,842 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:39:41,842 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:39:41,842 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:39:41,842 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:39:41,842 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:39:41,842 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:39:41,842 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:39:41,842 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:39:41,842 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:39:41,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, ASSIGN}] 2024-11-28T00:39:41,845 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, ASSIGN 2024-11-28T00:39:41,847 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:39:41,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:39:41,998 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:39:41,999 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=6acff8215866b10794f82068c000d15a, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:42,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, ASSIGN because future has completed 2024-11-28T00:39:42,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6acff8215866b10794f82068c000d15a, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:39:42,174 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:42,174 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 6acff8215866b10794f82068c000d15a, NAME => 'testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:39:42,175 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. service=AccessControlService 2024-11-28T00:39:42,175 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:42,175 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,175 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:42,176 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,176 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,180 INFO [StoreOpener-6acff8215866b10794f82068c000d15a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,185 INFO [StoreOpener-6acff8215866b10794f82068c000d15a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6acff8215866b10794f82068c000d15a columnFamilyName cf 2024-11-28T00:39:42,185 DEBUG [StoreOpener-6acff8215866b10794f82068c000d15a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:42,190 INFO [StoreOpener-6acff8215866b10794f82068c000d15a-1 {}] regionserver.HStore(327): Store=6acff8215866b10794f82068c000d15a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:42,191 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,198 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,200 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,201 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,201 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,207 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,227 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:39:42,228 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 6acff8215866b10794f82068c000d15a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69199750, jitterRate=0.03115662932395935}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:39:42,229 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:42,240 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 6acff8215866b10794f82068c000d15a: Running coprocessor pre-open hook at 1732754382176Writing region info on filesystem at 1732754382176Initializing all the Stores at 1732754382179 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754382179Cleaning up temporary data from old regions at 1732754382201 (+22 ms)Running coprocessor post-open hooks at 1732754382229 (+28 ms)Region opened successfully at 1732754382236 (+7 ms) 2024-11-28T00:39:42,242 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a., pid=20, masterSystemTime=1732754382168 2024-11-28T00:39:42,248 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:42,248 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:42,250 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=6acff8215866b10794f82068c000d15a, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:42,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6acff8215866b10794f82068c000d15a, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:39:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:39:42,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-11-28T00:39:42,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 6acff8215866b10794f82068c000d15a, server=97f25515df69,38585,1732754368938 in 250 msec 2024-11-28T00:39:42,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-28T00:39:42,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, ASSIGN in 428 msec 2024-11-28T00:39:42,279 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:39:42,279 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754382279"}]},"ts":"1732754382279"} 2024-11-28T00:39:42,284 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-28T00:39:42,286 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:39:42,287 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-28T00:39:42,296 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:39:42,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:42,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:42,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:42,349 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:39:42,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 739 msec 2024-11-28T00:39:42,365 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,365 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:39:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:39:42,770 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:39:42,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:42,775 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:39:43,367 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-11-28T00:39:44,390 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:39:45,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741861_1037 (size=134217728) 2024-11-28T00:39:45,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741861_1037 (size=134217728) 2024-11-28T00:39:45,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741861_1037 (size=134217728) 2024-11-28T00:39:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:48,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-28T00:39:48,443 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:48,443 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-28T00:39:49,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741862_1038 (size=134217728) 2024-11-28T00:39:49,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741862_1038 (size=134217728) 2024-11-28T00:39:49,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741862_1038 (size=134217728) 2024-11-28T00:39:49,920 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1732754382784/Put/seqid=0 2024-11-28T00:39:49,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741863_1039 (size=51979256) 2024-11-28T00:39:49,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741863_1039 (size=51979256) 2024-11-28T00:39:49,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741863_1039 (size=51979256) 2024-11-28T00:39:49,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70dcaaf7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:49,935 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:49,935 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:49,942 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:49,942 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:49,943 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:49,943 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b9202a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:49,943 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:49,944 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:49,945 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:49,948 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35110, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:49,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@270943e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:49,950 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:49,952 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:49,953 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:49,955 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33712, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:49,971 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:44423/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-28T00:39:49,972 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-28T00:39:49,974 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 97f25515df69,43227,1732754367980 2024-11-28T00:39:49,974 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2e5671 2024-11-28T00:39:49,974 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T00:39:49,978 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35116, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T00:39:49,985 WARN [IPC Server handler 0 on default port 44423 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-28T00:39:49,993 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a., hostname=97f25515df69,38585,1732754368938, seqNum=2] 2024-11-28T00:39:49,996 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:49,999 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:50,004 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:50,021 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:44423/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-11-28T00:39:50,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:50,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:50,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:50,056 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54855, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-28T00:39:50,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43795 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-28T00:39:50,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43795 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.3:54855 deadline: 1732754450056, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-11-28T00:39:50,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:39:50,090 WARN [IPC Server handler 0 on default port 44423 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-28T00:39:50,116 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:44423/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/output/cf/test_file for inclusion in 6acff8215866b10794f82068c000d15a/cf 2024-11-28T00:39:50,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-11-28T00:39:50,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-28T00:39:50,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:44423/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-28T00:39:50,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(2603): Flush status journal for 6acff8215866b10794f82068c000d15a: 2024-11-28T00:39:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:44423/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/output/cf/test_file to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/staging/jenkins__testExportFileSystemStateWithSplitRegion__h2dpsjkmb1rcl4aas66k6t5a0pt3in2ef8ibsfnm3523nacid4gq2ocpjq0fgb83/cf/test_file 2024-11-28T00:39:50,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/staging/jenkins__testExportFileSystemStateWithSplitRegion__h2dpsjkmb1rcl4aas66k6t5a0pt3in2ef8ibsfnm3523nacid4gq2ocpjq0fgb83/cf/test_file as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ 2024-11-28T00:39:50,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/staging/jenkins__testExportFileSystemStateWithSplitRegion__h2dpsjkmb1rcl4aas66k6t5a0pt3in2ef8ibsfnm3523nacid4gq2ocpjq0fgb83/cf/test_file into 6acff8215866b10794f82068c000d15a/cf as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ - updating store file list. 2024-11-28T00:39:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-28T00:39:50,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ into 6acff8215866b10794f82068c000d15a/cf 2024-11-28T00:39:50,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/staging/jenkins__testExportFileSystemStateWithSplitRegion__h2dpsjkmb1rcl4aas66k6t5a0pt3in2ef8ibsfnm3523nacid4gq2ocpjq0fgb83/cf/test_file into 6acff8215866b10794f82068c000d15a/cf (new location: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_) 2024-11-28T00:39:50,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/staging/jenkins__testExportFileSystemStateWithSplitRegion__h2dpsjkmb1rcl4aas66k6t5a0pt3in2ef8ibsfnm3523nacid4gq2ocpjq0fgb83/cf/test_file 2024-11-28T00:39:50,153 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-28T00:39:50,153 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:39:50,153 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:50,153 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:50,153 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:39:50,153 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:50,155 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a., hostname=97f25515df69,38585,1732754368938, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a., hostname=97f25515df69,38585,1732754368938, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=97f25515df69:38585 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-28T00:39:50,156 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a., hostname=97f25515df69,38585,1732754368938, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-28T00:39:50,156 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a., hostname=97f25515df69,38585,1732754368938, seqNum=2 from cache 2024-11-28T00:39:50,157 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-11-28T00:39:50,162 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a., hostname=97f25515df69,38585,1732754368938, seqNum=2] 2024-11-28T00:39:50,169 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.3 split testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:50,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=97f25515df69,38585,1732754368938 2024-11-28T00:39:50,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=6acff8215866b10794f82068c000d15a, daughterA=22d943ab7918fe8b41b7f53e697ce458, daughterB=0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,181 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=6acff8215866b10794f82068c000d15a, daughterA=22d943ab7918fe8b41b7f53e697ce458, daughterB=0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,181 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=6acff8215866b10794f82068c000d15a, daughterA=22d943ab7918fe8b41b7f53e697ce458, daughterB=0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,181 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=6acff8215866b10794f82068c000d15a, daughterA=22d943ab7918fe8b41b7f53e697ce458, daughterB=0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:39:50,187 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, UNASSIGN}] 2024-11-28T00:39:50,189 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, UNASSIGN 2024-11-28T00:39:50,191 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=6acff8215866b10794f82068c000d15a, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:50,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, UNASSIGN because future has completed 2024-11-28T00:39:50,195 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-28T00:39:50,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6acff8215866b10794f82068c000d15a, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:39:50,269 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=97f25515df69:43795 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-11-28T00:39:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:39:50,353 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:50,353 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-28T00:39:50,354 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 6acff8215866b10794f82068c000d15a, disabling compactions & flushes 2024-11-28T00:39:50,354 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:50,354 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:50,354 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. after waiting 0 ms 2024-11-28T00:39:50,354 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:50,359 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-11-28T00:39:50,362 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:39:50,363 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a. 2024-11-28T00:39:50,363 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 6acff8215866b10794f82068c000d15a: Waiting for close lock at 1732754390353Running coprocessor pre-close hooks at 1732754390353Disabling compacts and flushes for region at 1732754390354 (+1 ms)Disabling writes for close at 1732754390354Writing region close event to WAL at 1732754390355 (+1 ms)Running coprocessor post-close hooks at 1732754390359 (+4 ms)Closed at 1732754390362 (+3 ms) 2024-11-28T00:39:50,366 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:50,367 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=6acff8215866b10794f82068c000d15a, regionState=CLOSED 2024-11-28T00:39:50,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6acff8215866b10794f82068c000d15a, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:39:50,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-28T00:39:50,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 6acff8215866b10794f82068c000d15a, server=97f25515df69,38585,1732754368938 in 176 msec 2024-11-28T00:39:50,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-28T00:39:50,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6acff8215866b10794f82068c000d15a, UNASSIGN in 187 msec 2024-11-28T00:39:50,401 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:50,406 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=6acff8215866b10794f82068c000d15a, threads=1 2024-11-28T00:39:50,410 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ for region: 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:50,427 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-28T00:39:50,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741864_1040 (size=21) 2024-11-28T00:39:50,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741864_1040 (size=21) 2024-11-28T00:39:50,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741864_1040 (size=21) 2024-11-28T00:39:50,472 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-28T00:39:50,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741865_1041 (size=21) 2024-11-28T00:39:50,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741865_1041 (size=21) 2024-11-28T00:39:50,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741865_1041 (size=21) 2024-11-28T00:39:50,493 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ for region: 6acff8215866b10794f82068c000d15a 2024-11-28T00:39:50,496 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 6acff8215866b10794f82068c000d15a Daughter A: [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a] storefiles, Daughter B: [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a] storefiles. 2024-11-28T00:39:50,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:39:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741866_1042 (size=76) 2024-11-28T00:39:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741866_1042 (size=76) 2024-11-28T00:39:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741866_1042 (size=76) 2024-11-28T00:39:50,525 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:50,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741867_1043 (size=76) 2024-11-28T00:39:50,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741867_1043 (size=76) 2024-11-28T00:39:50,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741867_1043 (size=76) 2024-11-28T00:39:50,550 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:50,564 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-28T00:39:50,572 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-28T00:39:50,581 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732754390579"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1732754390579"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1732754390579"}]},"ts":"1732754390579"} 2024-11-28T00:39:50,581 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732754390579"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754390579"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732754390579"}]},"ts":"1732754390579"} 2024-11-28T00:39:50,581 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732754390579"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754390579"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732754390579"}]},"ts":"1732754390579"} 2024-11-28T00:39:50,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, ASSIGN}] 2024-11-28T00:39:50,602 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, ASSIGN 2024-11-28T00:39:50,602 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, ASSIGN 2024-11-28T00:39:50,603 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, ASSIGN; state=SPLITTING_NEW, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:39:50,603 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, ASSIGN; state=SPLITTING_NEW, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:39:50,753 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:39:50,754 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=0d08c0085d18817c8abcbaffb1795b08, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:50,754 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=22d943ab7918fe8b41b7f53e697ce458, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:50,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, ASSIGN because future has completed 2024-11-28T00:39:50,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d08c0085d18817c8abcbaffb1795b08, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:39:50,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, ASSIGN because future has completed 2024-11-28T00:39:50,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22d943ab7918fe8b41b7f53e697ce458, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:39:50,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:39:50,915 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:39:50,916 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 0d08c0085d18817c8abcbaffb1795b08, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08.', STARTKEY => '5', ENDKEY => ''} 2024-11-28T00:39:50,916 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. service=AccessControlService 2024-11-28T00:39:50,916 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:50,916 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,917 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:50,917 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,917 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,919 INFO [StoreOpener-0d08c0085d18817c8abcbaffb1795b08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:50,920 INFO [StoreOpener-0d08c0085d18817c8abcbaffb1795b08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d08c0085d18817c8abcbaffb1795b08 columnFamilyName cf 2024-11-28T00:39:50,920 DEBUG [StoreOpener-0d08c0085d18817c8abcbaffb1795b08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:51,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:39:51,605 DEBUG [StoreFileOpener-0d08c0085d18817c8abcbaffb1795b08-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a: NONE, but ROW specified in column family configuration 2024-11-28T00:39:51,627 DEBUG [StoreOpener-0d08c0085d18817c8abcbaffb1795b08-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_-top 2024-11-28T00:39:51,628 INFO [StoreOpener-0d08c0085d18817c8abcbaffb1795b08-1 {}] regionserver.HStore(327): Store=0d08c0085d18817c8abcbaffb1795b08/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:51,629 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:51,630 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:51,632 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:51,633 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:51,633 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:51,636 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:51,638 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 0d08c0085d18817c8abcbaffb1795b08; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71631130, jitterRate=0.06738701462745667}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:39:51,638 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:51,639 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 0d08c0085d18817c8abcbaffb1795b08: Running coprocessor pre-open hook at 1732754390917Writing region info on filesystem at 1732754390917Initializing all the Stores at 1732754390918 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754390918Cleaning up temporary data from old regions at 1732754391633 (+715 ms)Running coprocessor post-open hooks at 1732754391638 (+5 ms)Region opened successfully at 1732754391639 (+1 ms) 2024-11-28T00:39:51,641 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08., pid=26, masterSystemTime=1732754390911 2024-11-28T00:39:51,642 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08.,because compaction is disabled. 2024-11-28T00:39:51,645 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:39:51,646 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:39:51,647 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=0d08c0085d18817c8abcbaffb1795b08, regionState=OPEN, openSeqNum=7, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:51,647 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:39:51,647 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 22d943ab7918fe8b41b7f53e697ce458, NAME => 'testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458.', STARTKEY => '', ENDKEY => '5'} 2024-11-28T00:39:51,647 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. service=AccessControlService 2024-11-28T00:39:51,647 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:39:51,648 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,648 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:39:51,648 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,648 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d08c0085d18817c8abcbaffb1795b08, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:39:51,650 INFO [StoreOpener-22d943ab7918fe8b41b7f53e697ce458-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,652 INFO [StoreOpener-22d943ab7918fe8b41b7f53e697ce458-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 22d943ab7918fe8b41b7f53e697ce458 columnFamilyName cf 2024-11-28T00:39:51,652 DEBUG [StoreOpener-22d943ab7918fe8b41b7f53e697ce458-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:51,657 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-11-28T00:39:51,657 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 0d08c0085d18817c8abcbaffb1795b08, server=97f25515df69,38585,1732754368938 in 894 msec 2024-11-28T00:39:51,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, ASSIGN in 1.0570 sec 2024-11-28T00:39:51,670 DEBUG [StoreFileOpener-22d943ab7918fe8b41b7f53e697ce458-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a: NONE, but ROW specified in column family configuration 2024-11-28T00:39:51,675 DEBUG [StoreOpener-22d943ab7918fe8b41b7f53e697ce458-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_-bottom 2024-11-28T00:39:51,676 INFO [StoreOpener-22d943ab7918fe8b41b7f53e697ce458-1 {}] regionserver.HStore(327): Store=22d943ab7918fe8b41b7f53e697ce458/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:39:51,676 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,678 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,680 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,680 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,681 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,684 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,685 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 22d943ab7918fe8b41b7f53e697ce458; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72440141, jitterRate=0.07944221794605255}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:39:51,686 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:51,686 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 22d943ab7918fe8b41b7f53e697ce458: Running coprocessor pre-open hook at 1732754391648Writing region info on filesystem at 1732754391648Initializing all the Stores at 1732754391649 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754391649Cleaning up temporary data from old regions at 1732754391681 (+32 ms)Running coprocessor post-open hooks at 1732754391686 (+5 ms)Region opened successfully at 1732754391686 2024-11-28T00:39:51,687 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458., pid=27, masterSystemTime=1732754390911 2024-11-28T00:39:51,688 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458.,because compaction is disabled. 2024-11-28T00:39:51,693 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:39:51,693 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:39:51,693 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=22d943ab7918fe8b41b7f53e697ce458, regionState=OPEN, openSeqNum=7, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:39:51,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22d943ab7918fe8b41b7f53e697ce458, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:39:51,705 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=24 2024-11-28T00:39:51,705 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 22d943ab7918fe8b41b7f53e697ce458, server=97f25515df69,38585,1732754368938 in 941 msec 2024-11-28T00:39:51,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=21 2024-11-28T00:39:51,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, ASSIGN in 1.1050 sec 2024-11-28T00:39:51,714 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=6acff8215866b10794f82068c000d15a, daughterA=22d943ab7918fe8b41b7f53e697ce458, daughterB=0d08c0085d18817c8abcbaffb1795b08 in 1.5350 sec 2024-11-28T00:39:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:39:52,328 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:39:52,328 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:39:52,334 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:39:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754392334 (current time:1732754392334). 2024-11-28T00:39:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:39:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-28T00:39:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:39:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2403ac6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:52,337 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:52,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:52,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:52,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49c07711, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:52,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:52,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:52,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:52,339 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52680, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:52,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b891da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:52,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:52,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:52,343 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52814, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:52,345 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:39:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:39:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:52,345 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:39:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8993d8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:39:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:39:52,347 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:39:52,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:39:52,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:39:52,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e828c2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:52,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:39:52,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:39:52,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:52,350 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:39:52,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d7be7c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:39:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:39:52,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:39:52,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:39:52,355 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52828, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:39:52,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:39:52,359 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:39:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:39:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:39:52,360 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:39:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:39:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:39:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:39:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-28T00:39:52,364 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:39:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-28T00:39:52,365 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:39:52,368 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:39:52,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741868_1044 (size=197) 2024-11-28T00:39:52,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741868_1044 (size=197) 2024-11-28T00:39:52,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741868_1044 (size=197) 2024-11-28T00:39:52,381 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:39:52,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22d943ab7918fe8b41b7f53e697ce458}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d08c0085d18817c8abcbaffb1795b08}] 2024-11-28T00:39:52,382 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:52,383 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-28T00:39:52,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-11-28T00:39:52,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-11-28T00:39:52,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:39:52,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 22d943ab7918fe8b41b7f53e697ce458: 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 0d08c0085d18817c8abcbaffb1795b08: 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_-top] hfiles 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_-bottom] hfiles 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741869_1045 (size=182) 2024-11-28T00:39:52,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741869_1045 (size=182) 2024-11-28T00:39:52,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741869_1045 (size=182) 2024-11-28T00:39:52,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:39:52,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-11-28T00:39:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-11-28T00:39:52,552 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:52,552 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:39:52,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741870_1046 (size=182) 2024-11-28T00:39:52,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741870_1046 (size=182) 2024-11-28T00:39:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741870_1046 (size=182) 2024-11-28T00:39:52,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:39:52,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-28T00:39:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-11-28T00:39:52,556 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:52,556 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:39:52,558 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0d08c0085d18817c8abcbaffb1795b08 in 173 msec 2024-11-28T00:39:52,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-11-28T00:39:52,559 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:39:52,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 22d943ab7918fe8b41b7f53e697ce458 in 176 msec 2024-11-28T00:39:52,561 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:39:52,561 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:39:52,561 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:39:52,562 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_] hfiles 2024-11-28T00:39:52,562 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ 2024-11-28T00:39:52,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741871_1047 (size=129) 2024-11-28T00:39:52,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741871_1047 (size=129) 2024-11-28T00:39:52,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741871_1047 (size=129) 2024-11-28T00:39:52,572 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 6acff8215866b10794f82068c000d15a, NAME => 'testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,573 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:39:52,574 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:39:52,575 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,576 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741872_1048 (size=891) 2024-11-28T00:39:52,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741872_1048 (size=891) 2024-11-28T00:39:52,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741872_1048 (size=891) 2024-11-28T00:39:52,594 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:39:52,602 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:39:52,603 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,605 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:39:52,605 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-28T00:39:52,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 244 msec 2024-11-28T00:39:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-28T00:39:52,679 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:39:52,679 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679 2024-11-28T00:39:52,680 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:52,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:39:52,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,731 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:39:52,738 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:39:52,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741874_1050 (size=891) 2024-11-28T00:39:52,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741874_1050 (size=891) 2024-11-28T00:39:52,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741874_1050 (size=891) 2024-11-28T00:39:52,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741873_1049 (size=197) 2024-11-28T00:39:52,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741873_1049 (size=197) 2024-11-28T00:39:52,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741873_1049 (size=197) 2024-11-28T00:39:52,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:52,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:52,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:53,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-9293813044225198143.jar 2024-11-28T00:39:53,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:53,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:54,047 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-4151094006560512498.jar 2024-11-28T00:39:54,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:54,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:54,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:54,050 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:54,078 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:54,079 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:39:54,080 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:39:54,080 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:39:54,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:39:54,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:39:54,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:39:54,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:39:54,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:39:54,083 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:39:54,083 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:39:54,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:39:54,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:39:54,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:39:54,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:39:54,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:39:54,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:39:54,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:39:54,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:39:54,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:39:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741875_1051 (size=24020) 2024-11-28T00:39:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741875_1051 (size=24020) 2024-11-28T00:39:54,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741875_1051 (size=24020) 2024-11-28T00:39:54,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741876_1052 (size=77755) 2024-11-28T00:39:54,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741876_1052 (size=77755) 2024-11-28T00:39:54,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741876_1052 (size=77755) 2024-11-28T00:39:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741877_1053 (size=131360) 2024-11-28T00:39:54,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741877_1053 (size=131360) 2024-11-28T00:39:54,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741877_1053 (size=131360) 2024-11-28T00:39:54,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741878_1054 (size=111793) 2024-11-28T00:39:54,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741878_1054 (size=111793) 2024-11-28T00:39:54,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741878_1054 (size=111793) 2024-11-28T00:39:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741879_1055 (size=1832290) 2024-11-28T00:39:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741879_1055 (size=1832290) 2024-11-28T00:39:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741879_1055 (size=1832290) 2024-11-28T00:39:54,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741880_1056 (size=8360005) 2024-11-28T00:39:54,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741880_1056 (size=8360005) 2024-11-28T00:39:54,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741880_1056 (size=8360005) 2024-11-28T00:39:54,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741881_1057 (size=503880) 2024-11-28T00:39:54,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741881_1057 (size=503880) 2024-11-28T00:39:54,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741881_1057 (size=503880) 2024-11-28T00:39:54,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741882_1058 (size=6424743) 2024-11-28T00:39:54,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741882_1058 (size=6424743) 2024-11-28T00:39:54,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741882_1058 (size=6424743) 2024-11-28T00:39:54,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741883_1059 (size=322274) 2024-11-28T00:39:54,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741883_1059 (size=322274) 2024-11-28T00:39:54,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741883_1059 (size=322274) 2024-11-28T00:39:54,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741884_1060 (size=20406) 2024-11-28T00:39:54,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741884_1060 (size=20406) 2024-11-28T00:39:54,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741884_1060 (size=20406) 2024-11-28T00:39:54,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741885_1061 (size=45609) 2024-11-28T00:39:54,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741885_1061 (size=45609) 2024-11-28T00:39:54,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741885_1061 (size=45609) 2024-11-28T00:39:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741886_1062 (size=136454) 2024-11-28T00:39:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741886_1062 (size=136454) 2024-11-28T00:39:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741886_1062 (size=136454) 2024-11-28T00:39:54,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741887_1063 (size=1597136) 2024-11-28T00:39:54,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741887_1063 (size=1597136) 2024-11-28T00:39:54,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741887_1063 (size=1597136) 2024-11-28T00:39:54,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741888_1064 (size=30873) 2024-11-28T00:39:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741888_1064 (size=30873) 2024-11-28T00:39:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741888_1064 (size=30873) 2024-11-28T00:39:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741889_1065 (size=29229) 2024-11-28T00:39:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741889_1065 (size=29229) 2024-11-28T00:39:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741889_1065 (size=29229) 2024-11-28T00:39:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741890_1066 (size=903861) 2024-11-28T00:39:54,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741890_1066 (size=903861) 2024-11-28T00:39:54,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741890_1066 (size=903861) 2024-11-28T00:39:54,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741891_1067 (size=440956) 2024-11-28T00:39:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741891_1067 (size=440956) 2024-11-28T00:39:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741891_1067 (size=440956) 2024-11-28T00:39:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741892_1068 (size=5175431) 2024-11-28T00:39:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741892_1068 (size=5175431) 2024-11-28T00:39:54,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741892_1068 (size=5175431) 2024-11-28T00:39:54,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741893_1069 (size=232881) 2024-11-28T00:39:54,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741893_1069 (size=232881) 2024-11-28T00:39:54,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741893_1069 (size=232881) 2024-11-28T00:39:54,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741894_1070 (size=1323991) 2024-11-28T00:39:54,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741894_1070 (size=1323991) 2024-11-28T00:39:54,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741894_1070 (size=1323991) 2024-11-28T00:39:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741895_1071 (size=4695811) 2024-11-28T00:39:54,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741895_1071 (size=4695811) 2024-11-28T00:39:54,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741895_1071 (size=4695811) 2024-11-28T00:39:54,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741896_1072 (size=1877034) 2024-11-28T00:39:54,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741896_1072 (size=1877034) 2024-11-28T00:39:54,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741896_1072 (size=1877034) 2024-11-28T00:39:54,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741897_1073 (size=217555) 2024-11-28T00:39:54,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741897_1073 (size=217555) 2024-11-28T00:39:54,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741897_1073 (size=217555) 2024-11-28T00:39:54,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741898_1074 (size=4188619) 2024-11-28T00:39:54,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741898_1074 (size=4188619) 2024-11-28T00:39:54,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741898_1074 (size=4188619) 2024-11-28T00:39:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741899_1075 (size=127628) 2024-11-28T00:39:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741899_1075 (size=127628) 2024-11-28T00:39:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741899_1075 (size=127628) 2024-11-28T00:39:54,689 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:39:54,695 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-11-28T00:39:54,702 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=6acff8215866b10794f82068c000d15a-544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_. 2024-11-28T00:39:54,702 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=6acff8215866b10794f82068c000d15a-544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_. 2024-11-28T00:39:54,703 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-11-28T00:39:54,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741900_1076 (size=244) 2024-11-28T00:39:54,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741900_1076 (size=244) 2024-11-28T00:39:54,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741900_1076 (size=244) 2024-11-28T00:39:54,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741901_1077 (size=17) 2024-11-28T00:39:54,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741901_1077 (size=17) 2024-11-28T00:39:54,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741901_1077 (size=17) 2024-11-28T00:39:54,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741902_1078 (size=304131) 2024-11-28T00:39:54,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741902_1078 (size=304131) 2024-11-28T00:39:54,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741902_1078 (size=304131) 2024-11-28T00:39:55,454 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:39:55,454 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:39:55,601 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0001_000001 (auth:SIMPLE) from 127.0.0.1:39788 2024-11-28T00:39:55,965 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:39:56,878 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:40:04,128 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0001_000001 (auth:SIMPLE) from 127.0.0.1:35116 2024-11-28T00:40:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741903_1079 (size=349829) 2024-11-28T00:40:04,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741903_1079 (size=349829) 2024-11-28T00:40:04,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741903_1079 (size=349829) 2024-11-28T00:40:06,383 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0001_000001 (auth:SIMPLE) from 127.0.0.1:44080 2024-11-28T00:40:14,141 INFO [master/97f25515df69:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-28T00:40:14,141 INFO [master/97f25515df69:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-28T00:40:23,879 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5081119658eb6f560e50275c3a19a0ce, had cached 0 bytes from a total of 15717 2024-11-28T00:40:23,881 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5a5b4ebd73e242692e8c5b1b9f96994f, had cached 0 bytes from a total of 5978 2024-11-28T00:40:26,878 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:40:31,297 WARN [DataXceiver for client DFSClient_attempt_1732754375741_0001_m_000000_0_1606795823_1 at /127.0.0.1:43982 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073741904_1080] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 660ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/, blockId=1073741904, seqno=1308 2024-11-28T00:40:31,421 WARN [DataXceiver for client DFSClient_attempt_1732754375741_0001_m_000000_0_1606795823_1 at /127.0.0.1:55522 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073741904_1080] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 784ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/, blockId=1073741904, seqno=1308 2024-11-28T00:40:31,422 WARN [DataXceiver for client DFSClient_attempt_1732754375741_0001_m_000000_0_1606795823_1 at /127.0.0.1:41002 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073741904_1080] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 784ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/, blockId=1073741904, seqno=1308 2024-11-28T00:40:33,259 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5081119658eb6f560e50275c3a19a0ce changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:40:33,260 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5a5b4ebd73e242692e8c5b1b9f96994f changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:40:33,260 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 47c99a40beabefa004915b0134843184 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:40:35,917 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0d08c0085d18817c8abcbaffb1795b08, had cached 0 bytes from a total of 320414712 2024-11-28T00:40:36,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 22d943ab7918fe8b41b7f53e697ce458, had cached 0 bytes from a total of 320414712 2024-11-28T00:40:42,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741904_1080 (size=134217728) 2024-11-28T00:40:42,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741904_1080 (size=134217728) 2024-11-28T00:40:42,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741904_1080 (size=134217728) 2024-11-28T00:40:56,879 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:41:08,879 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5081119658eb6f560e50275c3a19a0ce, had cached 0 bytes from a total of 15717 2024-11-28T00:41:08,881 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5a5b4ebd73e242692e8c5b1b9f96994f, had cached 0 bytes from a total of 5978 2024-11-28T00:41:14,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741905_1081 (size=134217728) 2024-11-28T00:41:14,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741905_1081 (size=134217728) 2024-11-28T00:41:14,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741905_1081 (size=134217728) 2024-11-28T00:41:20,917 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0d08c0085d18817c8abcbaffb1795b08, had cached 0 bytes from a total of 320414712 2024-11-28T00:41:21,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 22d943ab7918fe8b41b7f53e697ce458, had cached 0 bytes from a total of 320414712 2024-11-28T00:41:26,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741906_1082 (size=51979256) 2024-11-28T00:41:26,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741906_1082 (size=51979256) 2024-11-28T00:41:26,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741906_1082 (size=51979256) 2024-11-28T00:41:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741907_1083 (size=17520) 2024-11-28T00:41:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741907_1083 (size=17520) 2024-11-28T00:41:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741907_1083 (size=17520) 2024-11-28T00:41:26,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741908_1084 (size=482) 2024-11-28T00:41:26,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741908_1084 (size=482) 2024-11-28T00:41:26,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741908_1084 (size=482) 2024-11-28T00:41:26,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741909_1085 (size=17520) 2024-11-28T00:41:26,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741909_1085 (size=17520) 2024-11-28T00:41:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741909_1085 (size=17520) 2024-11-28T00:41:26,812 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0001/container_1732754375741_0001_01_000002/launch_container.sh] 2024-11-28T00:41:26,812 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0001/container_1732754375741_0001_01_000002/container_tokens] 2024-11-28T00:41:26,812 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0001/container_1732754375741_0001_01_000002/sysfs] 2024-11-28T00:41:26,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741910_1086 (size=349829) 2024-11-28T00:41:26,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741910_1086 (size=349829) 2024-11-28T00:41:26,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741910_1086 (size=349829) 2024-11-28T00:41:26,841 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0001_000001 (auth:SIMPLE) from 127.0.0.1:54310 2024-11-28T00:41:26,879 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:41:28,815 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:41:28,816 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:41:28,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:28,823 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:41:28,824 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:41:28,824 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:28,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-28T00:41:28,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-28T00:41:28,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:28,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-28T00:41:28,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754392679/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-28T00:41:28,840 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-28T00:41:28,848 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754488848"}]},"ts":"1732754488848"} 2024-11-28T00:41:28,850 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-28T00:41:28,850 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-28T00:41:28,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-11-28T00:41:28,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, UNASSIGN}] 2024-11-28T00:41:28,857 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, UNASSIGN 2024-11-28T00:41:28,857 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, UNASSIGN 2024-11-28T00:41:28,858 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=22d943ab7918fe8b41b7f53e697ce458, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:28,858 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=0d08c0085d18817c8abcbaffb1795b08, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:28,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, UNASSIGN because future has completed 2024-11-28T00:41:28,861 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:41:28,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22d943ab7918fe8b41b7f53e697ce458, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:41:28,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, UNASSIGN because future has completed 2024-11-28T00:41:28,862 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:41:28,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0d08c0085d18817c8abcbaffb1795b08, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:41:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-28T00:41:29,015 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:41:29,015 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:41:29,015 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 0d08c0085d18817c8abcbaffb1795b08, disabling compactions & flushes 2024-11-28T00:41:29,015 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:41:29,015 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:41:29,015 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. after waiting 0 ms 2024-11-28T00:41:29,015 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:41:29,020 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-28T00:41:29,021 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:41:29,021 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08. 2024-11-28T00:41:29,021 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 0d08c0085d18817c8abcbaffb1795b08: Waiting for close lock at 1732754489015Running coprocessor pre-close hooks at 1732754489015Disabling compacts and flushes for region at 1732754489015Disabling writes for close at 1732754489015Writing region close event to WAL at 1732754489016 (+1 ms)Running coprocessor post-close hooks at 1732754489021 (+5 ms)Closed at 1732754489021 2024-11-28T00:41:29,023 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:41:29,024 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:41:29,024 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:41:29,024 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 22d943ab7918fe8b41b7f53e697ce458, disabling compactions & flushes 2024-11-28T00:41:29,024 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:41:29,024 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:41:29,024 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. after waiting 0 ms 2024-11-28T00:41:29,024 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:41:29,024 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=0d08c0085d18817c8abcbaffb1795b08, regionState=CLOSED 2024-11-28T00:41:29,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0d08c0085d18817c8abcbaffb1795b08, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:41:29,029 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-28T00:41:29,030 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:41:29,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-11-28T00:41:29,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 0d08c0085d18817c8abcbaffb1795b08, server=97f25515df69,38585,1732754368938 in 166 msec 2024-11-28T00:41:29,030 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458. 2024-11-28T00:41:29,030 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 22d943ab7918fe8b41b7f53e697ce458: Waiting for close lock at 1732754489024Running coprocessor pre-close hooks at 1732754489024Disabling compacts and flushes for region at 1732754489024Disabling writes for close at 1732754489024Writing region close event to WAL at 1732754489025 (+1 ms)Running coprocessor post-close hooks at 1732754489030 (+5 ms)Closed at 1732754489030 2024-11-28T00:41:29,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0d08c0085d18817c8abcbaffb1795b08, UNASSIGN in 174 msec 2024-11-28T00:41:29,032 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:41:29,033 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=22d943ab7918fe8b41b7f53e697ce458, regionState=CLOSED 2024-11-28T00:41:29,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22d943ab7918fe8b41b7f53e697ce458, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:41:29,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-11-28T00:41:29,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 22d943ab7918fe8b41b7f53e697ce458, server=97f25515df69,38585,1732754368938 in 175 msec 2024-11-28T00:41:29,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-11-28T00:41:29,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=22d943ab7918fe8b41b7f53e697ce458, UNASSIGN in 182 msec 2024-11-28T00:41:29,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-11-28T00:41:29,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 191 msec 2024-11-28T00:41:29,046 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754489045"}]},"ts":"1732754489045"} 2024-11-28T00:41:29,047 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-28T00:41:29,047 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-28T00:41:29,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 206 msec 2024-11-28T00:41:29,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-28T00:41:29,168 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:41:29,171 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,177 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,179 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,182 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,188 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a 2024-11-28T00:41:29,188 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:41:29,188 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:41:29,192 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/recovered.edits] 2024-11-28T00:41:29,192 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/recovered.edits] 2024-11-28T00:41:29,192 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/recovered.edits] 2024-11-28T00:41:29,199 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a 2024-11-28T00:41:29,199 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_.6acff8215866b10794f82068c000d15a 2024-11-28T00:41:29,199 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/cf/544c5991ff2b470e99c6d7a2b23b5c9b_SeqId_4_ 2024-11-28T00:41:29,203 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/recovered.edits/10.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08/recovered.edits/10.seqid 2024-11-28T00:41:29,203 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/recovered.edits/6.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a/recovered.edits/6.seqid 2024-11-28T00:41:29,204 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/0d08c0085d18817c8abcbaffb1795b08 2024-11-28T00:41:29,204 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/recovered.edits/10.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458/recovered.edits/10.seqid 2024-11-28T00:41:29,204 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/6acff8215866b10794f82068c000d15a 2024-11-28T00:41:29,205 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportFileSystemStateWithSplitRegion/22d943ab7918fe8b41b7f53e697ce458 2024-11-28T00:41:29,205 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-11-28T00:41:29,207 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-28T00:41:29,216 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-28T00:41:29,277 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,277 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,278 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,278 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,278 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,279 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-28T00:41:29,280 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,281 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-11-28T00:41:29,281 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754489281"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:29,281 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754489281"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:29,281 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754489281"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:29,284 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-11-28T00:41:29,284 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6acff8215866b10794f82068c000d15a, NAME => 'testExportFileSystemStateWithSplitRegion,,1732754381620.6acff8215866b10794f82068c000d15a.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 22d943ab7918fe8b41b7f53e697ce458, NAME => 'testExportFileSystemStateWithSplitRegion,,1732754390175.22d943ab7918fe8b41b7f53e697ce458.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 0d08c0085d18817c8abcbaffb1795b08, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732754390175.0d08c0085d18817c8abcbaffb1795b08.', STARTKEY => '5', ENDKEY => ''}] 2024-11-28T00:41:29,285 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-28T00:41:29,285 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754489285"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:29,287 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-11-28T00:41:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,287 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,287 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,289 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-28T00:41:29,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:29,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:29,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:29,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:29,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 117 msec 2024-11-28T00:41:29,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-28T00:41:29,399 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,399 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:41:29,400 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-28T00:41:29,403 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754489403"}]},"ts":"1732754489403"} 2024-11-28T00:41:29,405 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-28T00:41:29,405 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-28T00:41:29,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-11-28T00:41:29,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, UNASSIGN}] 2024-11-28T00:41:29,409 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, UNASSIGN 2024-11-28T00:41:29,409 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, UNASSIGN 2024-11-28T00:41:29,409 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=5081119658eb6f560e50275c3a19a0ce, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:41:29,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=5a5b4ebd73e242692e8c5b1b9f96994f, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:29,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, UNASSIGN because future has completed 2024-11-28T00:41:29,412 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:41:29,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5081119658eb6f560e50275c3a19a0ce, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:41:29,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, UNASSIGN because future has completed 2024-11-28T00:41:29,413 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:41:29,413 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:41:29,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-28T00:41:29,565 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:41:29,565 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:41:29,566 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 5081119658eb6f560e50275c3a19a0ce, disabling compactions & flushes 2024-11-28T00:41:29,566 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:41:29,566 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:41:29,566 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. after waiting 0 ms 2024-11-28T00:41:29,566 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:41:29,566 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:41:29,566 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:41:29,566 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 5a5b4ebd73e242692e8c5b1b9f96994f, disabling compactions & flushes 2024-11-28T00:41:29,566 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:41:29,566 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:41:29,567 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. after waiting 1 ms 2024-11-28T00:41:29,567 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:41:29,571 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:41:29,571 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:41:29,571 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:41:29,571 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce. 2024-11-28T00:41:29,571 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:41:29,571 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 5081119658eb6f560e50275c3a19a0ce: Waiting for close lock at 1732754489566Running coprocessor pre-close hooks at 1732754489566Disabling compacts and flushes for region at 1732754489566Disabling writes for close at 1732754489566Writing region close event to WAL at 1732754489567 (+1 ms)Running coprocessor post-close hooks at 1732754489571 (+4 ms)Closed at 1732754489571 2024-11-28T00:41:29,571 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f. 2024-11-28T00:41:29,572 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 5a5b4ebd73e242692e8c5b1b9f96994f: Waiting for close lock at 1732754489566Running coprocessor pre-close hooks at 1732754489566Disabling compacts and flushes for region at 1732754489566Disabling writes for close at 1732754489567 (+1 ms)Writing region close event to WAL at 1732754489567Running coprocessor post-close hooks at 1732754489571 (+4 ms)Closed at 1732754489571 2024-11-28T00:41:29,573 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:41:29,574 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=5a5b4ebd73e242692e8c5b1b9f96994f, regionState=CLOSED 2024-11-28T00:41:29,574 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:41:29,575 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=5081119658eb6f560e50275c3a19a0ce, regionState=CLOSED 2024-11-28T00:41:29,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:41:29,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5081119658eb6f560e50275c3a19a0ce, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:41:29,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-11-28T00:41:29,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 5a5b4ebd73e242692e8c5b1b9f96994f, server=97f25515df69,38585,1732754368938 in 165 msec 2024-11-28T00:41:29,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-28T00:41:29,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 5081119658eb6f560e50275c3a19a0ce, server=97f25515df69,34347,1732754369169 in 167 msec 2024-11-28T00:41:29,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5a5b4ebd73e242692e8c5b1b9f96994f, UNASSIGN in 172 msec 2024-11-28T00:41:29,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-11-28T00:41:29,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5081119658eb6f560e50275c3a19a0ce, UNASSIGN in 173 msec 2024-11-28T00:41:29,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-28T00:41:29,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 177 msec 2024-11-28T00:41:29,586 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754489586"}]},"ts":"1732754489586"} 2024-11-28T00:41:29,588 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-28T00:41:29,588 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-28T00:41:29,591 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 189 msec 2024-11-28T00:41:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-28T00:41:29,718 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:41:29,718 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,720 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,721 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,724 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,726 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:41:29,727 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:41:29,729 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/recovered.edits] 2024-11-28T00:41:29,729 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/recovered.edits] 2024-11-28T00:41:29,733 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/cf/c743017cf6d048c7a911df243d06539f to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/cf/c743017cf6d048c7a911df243d06539f 2024-11-28T00:41:29,733 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/cf/8ede43c4adfc416986840db283588899 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/cf/8ede43c4adfc416986840db283588899 2024-11-28T00:41:29,737 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce/recovered.edits/9.seqid 2024-11-28T00:41:29,737 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f/recovered.edits/9.seqid 2024-11-28T00:41:29,737 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:41:29,738 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSplitRegion/5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:41:29,738 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-11-28T00:41:29,738 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-11-28T00:41:29,739 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-11-28T00:41:29,743 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128af8fff7502d14474b67847a75c20d802_5081119658eb6f560e50275c3a19a0ce to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128af8fff7502d14474b67847a75c20d802_5081119658eb6f560e50275c3a19a0ce 2024-11-28T00:41:29,745 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024112822f33b1ccdc3490abd2d78fb3eedb947_5a5b4ebd73e242692e8c5b1b9f96994f to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024112822f33b1ccdc3490abd2d78fb3eedb947_5a5b4ebd73e242692e8c5b1b9f96994f 2024-11-28T00:41:29,745 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-11-28T00:41:29,748 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,751 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-28T00:41:29,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,782 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,783 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,783 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,783 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,783 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:41:29,785 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-28T00:41:29,787 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,787 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-11-28T00:41:29,787 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754489787"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:29,787 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754489787"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:29,791 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:41:29,791 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5a5b4ebd73e242692e8c5b1b9f96994f, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732754378365.5a5b4ebd73e242692e8c5b1b9f96994f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5081119658eb6f560e50275c3a19a0ce, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732754378365.5081119658eb6f560e50275c3a19a0ce.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:41:29,791 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-28T00:41:29,791 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754489791"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:29,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,793 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,793 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:29,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-28T00:41:29,795 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-11-28T00:41:29,796 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,798 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 78 msec 2024-11-28T00:41:29,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-28T00:41:29,898 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,898 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:41:29,914 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-28T00:41:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,919 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-28T00:41:29,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,924 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-28T00:41:29,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:29,953 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=756 (was 717) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1406 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:36900 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:35520 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:46161 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 123034) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1348172987_1 at /127.0.0.1:35500 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:60226 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46161 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=806 (was 778) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=529 (was 416) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=1300 (was 6765) 2024-11-28T00:41:29,954 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=756 is superior to 500 2024-11-28T00:41:29,971 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=756, OpenFileDescriptor=806, MaxFileDescriptor=1048576, SystemLoadAverage=529, ProcessCount=20, AvailableMemoryMB=1299 2024-11-28T00:41:29,972 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=756 is superior to 500 2024-11-28T00:41:29,973 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:41:29,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:29,976 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:41:29,977 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-11-28T00:41:29,977 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:41:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:41:29,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741911_1087 (size=442) 2024-11-28T00:41:29,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741911_1087 (size=442) 2024-11-28T00:41:29,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741911_1087 (size=442) 2024-11-28T00:41:29,990 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f7dd65dc1d931624811654da1d7eeaa0, NAME => 'testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:29,990 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ed0afa718b8fe519f06c5ee2df5fd2d3, NAME => 'testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:30,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741912_1088 (size=67) 2024-11-28T00:41:30,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741912_1088 (size=67) 2024-11-28T00:41:30,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741912_1088 (size=67) 2024-11-28T00:41:30,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741913_1089 (size=67) 2024-11-28T00:41:30,002 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:30,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741913_1089 (size=67) 2024-11-28T00:41:30,002 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing ed0afa718b8fe519f06c5ee2df5fd2d3, disabling compactions & flushes 2024-11-28T00:41:30,002 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,002 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,002 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. after waiting 0 ms 2024-11-28T00:41:30,002 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,002 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741913_1089 (size=67) 2024-11-28T00:41:30,002 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for ed0afa718b8fe519f06c5ee2df5fd2d3: Waiting for close lock at 1732754490002Disabling compacts and flushes for region at 1732754490002Disabling writes for close at 1732754490002Writing region close event to WAL at 1732754490002Closed at 1732754490002 2024-11-28T00:41:30,003 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:30,003 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing f7dd65dc1d931624811654da1d7eeaa0, disabling compactions & flushes 2024-11-28T00:41:30,003 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,003 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,003 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. after waiting 0 ms 2024-11-28T00:41:30,003 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,003 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,003 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for f7dd65dc1d931624811654da1d7eeaa0: Waiting for close lock at 1732754490003Disabling compacts and flushes for region at 1732754490003Disabling writes for close at 1732754490003Writing region close event to WAL at 1732754490003Closed at 1732754490003 2024-11-28T00:41:30,004 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:41:30,005 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732754490005"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754490005"}]},"ts":"1732754490005"} 2024-11-28T00:41:30,005 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732754490005"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754490005"}]},"ts":"1732754490005"} 2024-11-28T00:41:30,007 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:41:30,008 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:41:30,009 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754490009"}]},"ts":"1732754490009"} 2024-11-28T00:41:30,011 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-28T00:41:30,011 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:41:30,013 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:41:30,013 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:41:30,013 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:41:30,013 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:41:30,013 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:41:30,013 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:41:30,013 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:41:30,013 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:41:30,013 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:41:30,013 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:41:30,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, ASSIGN}] 2024-11-28T00:41:30,014 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, ASSIGN 2024-11-28T00:41:30,014 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, ASSIGN 2024-11-28T00:41:30,015 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:41:30,015 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:41:30,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:41:30,166 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:41:30,166 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=ed0afa718b8fe519f06c5ee2df5fd2d3, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:41:30,166 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=f7dd65dc1d931624811654da1d7eeaa0, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:30,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, ASSIGN because future has completed 2024-11-28T00:41:30,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7dd65dc1d931624811654da1d7eeaa0, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:41:30,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, ASSIGN because future has completed 2024-11-28T00:41:30,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:41:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:41:30,324 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => f7dd65dc1d931624811654da1d7eeaa0, NAME => 'testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:41:30,325 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => ed0afa718b8fe519f06c5ee2df5fd2d3, NAME => 'testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. service=AccessControlService 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. service=AccessControlService 2024-11-28T00:41:30,325 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:41:30,325 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:30,325 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:30,326 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,326 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,326 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,326 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,327 INFO [StoreOpener-f7dd65dc1d931624811654da1d7eeaa0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,327 INFO [StoreOpener-ed0afa718b8fe519f06c5ee2df5fd2d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,329 INFO [StoreOpener-f7dd65dc1d931624811654da1d7eeaa0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7dd65dc1d931624811654da1d7eeaa0 columnFamilyName cf 2024-11-28T00:41:30,329 INFO [StoreOpener-ed0afa718b8fe519f06c5ee2df5fd2d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed0afa718b8fe519f06c5ee2df5fd2d3 columnFamilyName cf 2024-11-28T00:41:30,330 DEBUG [StoreOpener-ed0afa718b8fe519f06c5ee2df5fd2d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:30,330 DEBUG [StoreOpener-f7dd65dc1d931624811654da1d7eeaa0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:30,330 INFO [StoreOpener-ed0afa718b8fe519f06c5ee2df5fd2d3-1 {}] regionserver.HStore(327): Store=ed0afa718b8fe519f06c5ee2df5fd2d3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:41:30,330 INFO [StoreOpener-f7dd65dc1d931624811654da1d7eeaa0-1 {}] regionserver.HStore(327): Store=f7dd65dc1d931624811654da1d7eeaa0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:41:30,330 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,331 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,331 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,331 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,332 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,332 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,332 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,332 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,332 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,332 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,333 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,334 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,335 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:41:30,335 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:41:30,336 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened f7dd65dc1d931624811654da1d7eeaa0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64904505, jitterRate=-0.032847508788108826}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:41:30,336 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened ed0afa718b8fe519f06c5ee2df5fd2d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65377662, jitterRate=-0.02579692006111145}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:41:30,336 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,336 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,337 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for ed0afa718b8fe519f06c5ee2df5fd2d3: Running coprocessor pre-open hook at 1732754490326Writing region info on filesystem at 1732754490326Initializing all the Stores at 1732754490327 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754490327Cleaning up temporary data from old regions at 1732754490332 (+5 ms)Running coprocessor post-open hooks at 1732754490336 (+4 ms)Region opened successfully at 1732754490337 (+1 ms) 2024-11-28T00:41:30,337 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for f7dd65dc1d931624811654da1d7eeaa0: Running coprocessor pre-open hook at 1732754490326Writing region info on filesystem at 1732754490326Initializing all the Stores at 1732754490327 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754490327Cleaning up temporary data from old regions at 1732754490332 (+5 ms)Running coprocessor post-open hooks at 1732754490336 (+4 ms)Region opened successfully at 1732754490337 (+1 ms) 2024-11-28T00:41:30,337 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0., pid=48, masterSystemTime=1732754490321 2024-11-28T00:41:30,337 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3., pid=49, masterSystemTime=1732754490322 2024-11-28T00:41:30,339 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,339 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,340 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=f7dd65dc1d931624811654da1d7eeaa0, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:30,340 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,340 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,342 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=ed0afa718b8fe519f06c5ee2df5fd2d3, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:41:30,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7dd65dc1d931624811654da1d7eeaa0, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:41:30,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:41:30,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-11-28T00:41:30,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure f7dd65dc1d931624811654da1d7eeaa0, server=97f25515df69,38585,1732754368938 in 174 msec 2024-11-28T00:41:30,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, ASSIGN in 332 msec 2024-11-28T00:41:30,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-11-28T00:41:30,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3, server=97f25515df69,34347,1732754369169 in 175 msec 2024-11-28T00:41:30,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-11-28T00:41:30,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, ASSIGN in 333 msec 2024-11-28T00:41:30,350 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:41:30,350 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754490350"}]},"ts":"1732754490350"} 2024-11-28T00:41:30,352 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-28T00:41:30,353 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:41:30,354 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-28T00:41:30,357 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-28T00:41:30,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:30,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:30,403 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:30,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:30,414 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:30,414 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:30,414 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:30,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 441 msec 2024-11-28T00:41:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:41:30,608 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:41:30,608 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:30,611 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-28T00:41:30,611 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,611 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:41:30,613 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:30,619 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:30,625 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:30,628 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:41:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754490628 (current time:1732754490628). 2024-11-28T00:41:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:41:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-28T00:41:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:41:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62958d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:30,630 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:30,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:30,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:30,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@679f8939, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:30,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:30,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:30,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:30,632 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44624, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:30,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ab01a3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:30,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:30,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:30,634 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55606, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:30,636 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:30,636 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5887d5db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:30,637 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:30,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:30,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:30,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3882620f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:30,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:30,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:30,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:30,639 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44654, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:30,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@645fd2ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:30,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:30,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:30,642 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55612, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:30,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:41:30,646 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:30,646 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-28T00:41:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:41:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:41:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-28T00:41:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-28T00:41:30,649 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:41:30,650 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:41:30,652 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:41:30,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741914_1090 (size=167) 2024-11-28T00:41:30,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741914_1090 (size=167) 2024-11-28T00:41:30,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741914_1090 (size=167) 2024-11-28T00:41:30,660 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:41:30,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3}] 2024-11-28T00:41:30,661 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,662 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-28T00:41:30,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-11-28T00:41:30,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for f7dd65dc1d931624811654da1d7eeaa0: 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for ed0afa718b8fe519f06c5ee2df5fd2d3: 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. for emptySnaptb0-testExportWithTargetName completed. 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. for emptySnaptb0-testExportWithTargetName completed. 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:30,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:30,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:41:30,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:41:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741915_1091 (size=70) 2024-11-28T00:41:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741915_1091 (size=70) 2024-11-28T00:41:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741915_1091 (size=70) 2024-11-28T00:41:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741916_1092 (size=70) 2024-11-28T00:41:30,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741916_1092 (size=70) 2024-11-28T00:41:30,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741916_1092 (size=70) 2024-11-28T00:41:30,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:30,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-11-28T00:41:30,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-28T00:41:30,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-11-28T00:41:30,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-11-28T00:41:30,828 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,828 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,828 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:30,828 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:30,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3 in 169 msec 2024-11-28T00:41:30,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-11-28T00:41:30,831 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:41:30,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0 in 169 msec 2024-11-28T00:41:30,832 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:41:30,833 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:41:30,833 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:41:30,833 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:30,834 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:41:30,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741917_1093 (size=62) 2024-11-28T00:41:30,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741917_1093 (size=62) 2024-11-28T00:41:30,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741917_1093 (size=62) 2024-11-28T00:41:30,842 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:41:30,842 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-28T00:41:30,843 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-28T00:41:30,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741918_1094 (size=649) 2024-11-28T00:41:30,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741918_1094 (size=649) 2024-11-28T00:41:30,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741918_1094 (size=649) 2024-11-28T00:41:30,858 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:41:30,863 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:41:30,864 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-28T00:41:30,865 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:41:30,865 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-28T00:41:30,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 219 msec 2024-11-28T00:41:30,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-28T00:41:30,968 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:41:30,976 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:41:30,979 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:41:30,981 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:30,988 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-28T00:41:30,988 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:30,988 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:41:30,992 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:30,998 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:31,004 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:41:31,007 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:41:31,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754491007 (current time:1732754491007). 2024-11-28T00:41:31,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:41:31,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-28T00:41:31,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:41:31,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35259a66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:31,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:31,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:31,009 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:31,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:31,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:31,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1128df78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:31,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:31,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:31,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:31,011 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44670, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:31,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@680994c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:31,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:31,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:31,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:31,014 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55628, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:31,015 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:31,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:31,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:31,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:31,015 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:31,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ec4cb3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:31,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:31,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:31,017 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:31,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:31,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:31,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cdf797e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:31,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:31,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:31,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:31,018 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44698, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:31,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7501f0c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:31,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:31,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:31,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:31,023 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55638, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:31,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:41:31,027 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:31,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:31,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:31,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:31,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-28T00:41:31,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:41:31,030 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:31,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:41:31,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-28T00:41:31,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-28T00:41:31,031 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:41:31,032 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:41:31,034 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:41:31,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741919_1095 (size=162) 2024-11-28T00:41:31,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741919_1095 (size=162) 2024-11-28T00:41:31,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741919_1095 (size=162) 2024-11-28T00:41:31,042 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:41:31,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3}] 2024-11-28T00:41:31,043 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:31,043 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:31,056 WARN [regionserver/97f25515df69:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-11-28T00:41:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-28T00:41:31,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-11-28T00:41:31,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-11-28T00:41:31,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:31,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:31,195 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing ed0afa718b8fe519f06c5ee2df5fd2d3 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:41:31,195 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing f7dd65dc1d931624811654da1d7eeaa0 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:41:31,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d90da721406e4a079a6afc43e4bf865e_f7dd65dc1d931624811654da1d7eeaa0 is 71, key is 03220a5fea7a90876c9356a159365704/cf:q/1732754490976/Put/seqid=0 2024-11-28T00:41:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411280b96ec872f8a4ad08ab77dcb1944a649_ed0afa718b8fe519f06c5ee2df5fd2d3 is 71, key is 10d28dd32d91ba20947d4d1c26abadae/cf:q/1732754490978/Put/seqid=0 2024-11-28T00:41:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741920_1096 (size=5102) 2024-11-28T00:41:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741920_1096 (size=5102) 2024-11-28T00:41:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741920_1096 (size=5102) 2024-11-28T00:41:31,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:31,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741921_1097 (size=8171) 2024-11-28T00:41:31,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741921_1097 (size=8171) 2024-11-28T00:41:31,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741921_1097 (size=8171) 2024-11-28T00:41:31,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:31,229 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d90da721406e4a079a6afc43e4bf865e_f7dd65dc1d931624811654da1d7eeaa0 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241128d90da721406e4a079a6afc43e4bf865e_f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/.tmp/cf/f9c682b572d14d0fbfad57ae80c88d89, store: [table=testtb-testExportWithTargetName family=cf region=f7dd65dc1d931624811654da1d7eeaa0] 2024-11-28T00:41:31,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/.tmp/cf/f9c682b572d14d0fbfad57ae80c88d89 is 208, key is 0f73f6eec7b858f5a285e6eebc14f7740/cf:q/1732754490976/Put/seqid=0 2024-11-28T00:41:31,232 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411280b96ec872f8a4ad08ab77dcb1944a649_ed0afa718b8fe519f06c5ee2df5fd2d3 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411280b96ec872f8a4ad08ab77dcb1944a649_ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:31,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/.tmp/cf/34e111a4619b4bc39650587d08103418, store: [table=testtb-testExportWithTargetName family=cf region=ed0afa718b8fe519f06c5ee2df5fd2d3] 2024-11-28T00:41:31,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/.tmp/cf/34e111a4619b4bc39650587d08103418 is 208, key is 1d0c1ef84023829de6839103e36ad45c7/cf:q/1732754490978/Put/seqid=0 2024-11-28T00:41:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741922_1098 (size=5914) 2024-11-28T00:41:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741922_1098 (size=5914) 2024-11-28T00:41:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741922_1098 (size=5914) 2024-11-28T00:41:31,243 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/.tmp/cf/f9c682b572d14d0fbfad57ae80c88d89 2024-11-28T00:41:31,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741923_1099 (size=14949) 2024-11-28T00:41:31,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741923_1099 (size=14949) 2024-11-28T00:41:31,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741923_1099 (size=14949) 2024-11-28T00:41:31,252 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/.tmp/cf/34e111a4619b4bc39650587d08103418 2024-11-28T00:41:31,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/.tmp/cf/f9c682b572d14d0fbfad57ae80c88d89 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/cf/f9c682b572d14d0fbfad57ae80c88d89 2024-11-28T00:41:31,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/.tmp/cf/34e111a4619b4bc39650587d08103418 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/cf/34e111a4619b4bc39650587d08103418 2024-11-28T00:41:31,259 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/cf/f9c682b572d14d0fbfad57ae80c88d89, entries=3, sequenceid=6, filesize=5.8 K 2024-11-28T00:41:31,260 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for f7dd65dc1d931624811654da1d7eeaa0 in 65ms, sequenceid=6, compaction requested=false 2024-11-28T00:41:31,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-28T00:41:31,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for f7dd65dc1d931624811654da1d7eeaa0: 2024-11-28T00:41:31,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. for snaptb0-testExportWithTargetName completed. 2024-11-28T00:41:31,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:41:31,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:31,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/cf/f9c682b572d14d0fbfad57ae80c88d89] hfiles 2024-11-28T00:41:31,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/cf/f9c682b572d14d0fbfad57ae80c88d89 for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:41:31,266 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/cf/34e111a4619b4bc39650587d08103418, entries=47, sequenceid=6, filesize=14.6 K 2024-11-28T00:41:31,267 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for ed0afa718b8fe519f06c5ee2df5fd2d3 in 72ms, sequenceid=6, compaction requested=false 2024-11-28T00:41:31,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for ed0afa718b8fe519f06c5ee2df5fd2d3: 2024-11-28T00:41:31,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. for snaptb0-testExportWithTargetName completed. 2024-11-28T00:41:31,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:41:31,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:31,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/cf/34e111a4619b4bc39650587d08103418] hfiles 2024-11-28T00:41:31,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/cf/34e111a4619b4bc39650587d08103418 for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:41:31,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741924_1100 (size=109) 2024-11-28T00:41:31,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741924_1100 (size=109) 2024-11-28T00:41:31,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741924_1100 (size=109) 2024-11-28T00:41:31,271 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:31,271 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-28T00:41:31,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-11-28T00:41:31,272 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:31,272 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:31,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7dd65dc1d931624811654da1d7eeaa0 in 231 msec 2024-11-28T00:41:31,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741925_1101 (size=109) 2024-11-28T00:41:31,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741925_1101 (size=109) 2024-11-28T00:41:31,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741925_1101 (size=109) 2024-11-28T00:41:31,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:31,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-11-28T00:41:31,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-11-28T00:41:31,278 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:31,278 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:31,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-11-28T00:41:31,281 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:41:31,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3 in 237 msec 2024-11-28T00:41:31,282 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:41:31,283 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:41:31,283 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:41:31,283 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:31,284 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411280b96ec872f8a4ad08ab77dcb1944a649_ed0afa718b8fe519f06c5ee2df5fd2d3, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241128d90da721406e4a079a6afc43e4bf865e_f7dd65dc1d931624811654da1d7eeaa0] hfiles 2024-11-28T00:41:31,284 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411280b96ec872f8a4ad08ab77dcb1944a649_ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:31,284 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241128d90da721406e4a079a6afc43e4bf865e_f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:31,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741926_1102 (size=293) 2024-11-28T00:41:31,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741926_1102 (size=293) 2024-11-28T00:41:31,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741926_1102 (size=293) 2024-11-28T00:41:31,296 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:41:31,296 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-28T00:41:31,297 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-28T00:41:31,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741927_1103 (size=959) 2024-11-28T00:41:31,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741927_1103 (size=959) 2024-11-28T00:41:31,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741927_1103 (size=959) 2024-11-28T00:41:31,314 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:41:31,322 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:41:31,323 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-28T00:41:31,325 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:41:31,325 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-28T00:41:31,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 296 msec 2024-11-28T00:41:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-28T00:41:31,348 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:41:31,349 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349 2024-11-28T00:41:31,349 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:31,382 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:31,382 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-28T00:41:31,384 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:41:31,390 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-28T00:41:31,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741929_1105 (size=959) 2024-11-28T00:41:31,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741929_1105 (size=959) 2024-11-28T00:41:31,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741929_1105 (size=959) 2024-11-28T00:41:31,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741928_1104 (size=162) 2024-11-28T00:41:31,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741928_1104 (size=162) 2024-11-28T00:41:31,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741928_1104 (size=162) 2024-11-28T00:41:31,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741930_1106 (size=154) 2024-11-28T00:41:31,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741930_1106 (size=154) 2024-11-28T00:41:31,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741930_1106 (size=154) 2024-11-28T00:41:31,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:31,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:31,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-3904832828394403026.jar 2024-11-28T00:41:32,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-12678465823607485345.jar 2024-11-28T00:41:32,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:32,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:41:32,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:41:32,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:41:32,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:41:32,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:41:32,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:41:32,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:41:32,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:41:32,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:41:32,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:41:32,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:41:32,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:32,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:32,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:41:32,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:32,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:32,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:41:32,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:41:32,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741931_1107 (size=24020) 2024-11-28T00:41:32,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741931_1107 (size=24020) 2024-11-28T00:41:32,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741931_1107 (size=24020) 2024-11-28T00:41:32,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741932_1108 (size=77755) 2024-11-28T00:41:32,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741932_1108 (size=77755) 2024-11-28T00:41:32,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741932_1108 (size=77755) 2024-11-28T00:41:32,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741933_1109 (size=131360) 2024-11-28T00:41:32,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741933_1109 (size=131360) 2024-11-28T00:41:32,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741933_1109 (size=131360) 2024-11-28T00:41:32,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741934_1110 (size=111793) 2024-11-28T00:41:32,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741934_1110 (size=111793) 2024-11-28T00:41:32,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741934_1110 (size=111793) 2024-11-28T00:41:32,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741935_1111 (size=1832290) 2024-11-28T00:41:32,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741935_1111 (size=1832290) 2024-11-28T00:41:32,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741935_1111 (size=1832290) 2024-11-28T00:41:32,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741936_1112 (size=8360005) 2024-11-28T00:41:32,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741936_1112 (size=8360005) 2024-11-28T00:41:32,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741936_1112 (size=8360005) 2024-11-28T00:41:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741937_1113 (size=503880) 2024-11-28T00:41:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741937_1113 (size=503880) 2024-11-28T00:41:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741937_1113 (size=503880) 2024-11-28T00:41:32,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741938_1114 (size=6424743) 2024-11-28T00:41:32,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741938_1114 (size=6424743) 2024-11-28T00:41:32,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741938_1114 (size=6424743) 2024-11-28T00:41:32,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741939_1115 (size=322274) 2024-11-28T00:41:32,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741939_1115 (size=322274) 2024-11-28T00:41:32,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741939_1115 (size=322274) 2024-11-28T00:41:32,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741940_1116 (size=20406) 2024-11-28T00:41:32,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741940_1116 (size=20406) 2024-11-28T00:41:32,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741940_1116 (size=20406) 2024-11-28T00:41:32,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741941_1117 (size=45609) 2024-11-28T00:41:32,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741941_1117 (size=45609) 2024-11-28T00:41:32,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741941_1117 (size=45609) 2024-11-28T00:41:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741942_1118 (size=136454) 2024-11-28T00:41:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741942_1118 (size=136454) 2024-11-28T00:41:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741942_1118 (size=136454) 2024-11-28T00:41:32,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741943_1119 (size=1597136) 2024-11-28T00:41:32,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741943_1119 (size=1597136) 2024-11-28T00:41:32,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741943_1119 (size=1597136) 2024-11-28T00:41:32,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741944_1120 (size=30873) 2024-11-28T00:41:32,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741944_1120 (size=30873) 2024-11-28T00:41:32,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741944_1120 (size=30873) 2024-11-28T00:41:32,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741945_1121 (size=29229) 2024-11-28T00:41:32,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741945_1121 (size=29229) 2024-11-28T00:41:32,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741945_1121 (size=29229) 2024-11-28T00:41:32,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741946_1122 (size=903861) 2024-11-28T00:41:32,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741946_1122 (size=903861) 2024-11-28T00:41:32,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741946_1122 (size=903861) 2024-11-28T00:41:32,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741947_1123 (size=5175431) 2024-11-28T00:41:32,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741947_1123 (size=5175431) 2024-11-28T00:41:32,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741947_1123 (size=5175431) 2024-11-28T00:41:32,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741948_1124 (size=232881) 2024-11-28T00:41:32,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741948_1124 (size=232881) 2024-11-28T00:41:32,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741948_1124 (size=232881) 2024-11-28T00:41:32,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741949_1125 (size=1323991) 2024-11-28T00:41:32,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741949_1125 (size=1323991) 2024-11-28T00:41:32,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741949_1125 (size=1323991) 2024-11-28T00:41:32,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741950_1126 (size=4695811) 2024-11-28T00:41:32,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741950_1126 (size=4695811) 2024-11-28T00:41:32,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741950_1126 (size=4695811) 2024-11-28T00:41:32,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741951_1127 (size=1877034) 2024-11-28T00:41:32,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741951_1127 (size=1877034) 2024-11-28T00:41:32,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741951_1127 (size=1877034) 2024-11-28T00:41:32,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741952_1128 (size=440956) 2024-11-28T00:41:32,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741952_1128 (size=440956) 2024-11-28T00:41:32,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741952_1128 (size=440956) 2024-11-28T00:41:32,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741953_1129 (size=217555) 2024-11-28T00:41:32,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741953_1129 (size=217555) 2024-11-28T00:41:32,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741953_1129 (size=217555) 2024-11-28T00:41:32,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741954_1130 (size=4188619) 2024-11-28T00:41:32,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741954_1130 (size=4188619) 2024-11-28T00:41:32,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741954_1130 (size=4188619) 2024-11-28T00:41:32,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741955_1131 (size=127628) 2024-11-28T00:41:32,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741955_1131 (size=127628) 2024-11-28T00:41:32,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741955_1131 (size=127628) 2024-11-28T00:41:32,816 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:41:32,819 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-28T00:41:32,822 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.6 K 2024-11-28T00:41:32,822 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:41:32,822 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-11-28T00:41:32,822 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:41:32,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741956_1132 (size=1031) 2024-11-28T00:41:32,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741956_1132 (size=1031) 2024-11-28T00:41:32,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741956_1132 (size=1031) 2024-11-28T00:41:32,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741957_1133 (size=35) 2024-11-28T00:41:32,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741957_1133 (size=35) 2024-11-28T00:41:32,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741957_1133 (size=35) 2024-11-28T00:41:32,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741958_1134 (size=304082) 2024-11-28T00:41:32,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741958_1134 (size=304082) 2024-11-28T00:41:32,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741958_1134 (size=304082) 2024-11-28T00:41:32,915 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:41:32,915 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:41:32,918 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0001_000001 (auth:SIMPLE) from 127.0.0.1:59376 2024-11-28T00:41:32,926 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0001/container_1732754375741_0001_01_000001/launch_container.sh] 2024-11-28T00:41:32,926 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0001/container_1732754375741_0001_01_000001/container_tokens] 2024-11-28T00:41:32,926 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0001/container_1732754375741_0001_01_000001/sysfs] 2024-11-28T00:41:33,734 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:48596 2024-11-28T00:41:34,093 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ed0afa718b8fe519f06c5ee2df5fd2d3 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:41:34,093 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f7dd65dc1d931624811654da1d7eeaa0 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:41:34,572 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:41:38,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-28T00:41:38,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-28T00:41:38,443 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:38,443 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:41:38,635 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:44586 2024-11-28T00:41:38,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741959_1135 (size=349780) 2024-11-28T00:41:38,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741959_1135 (size=349780) 2024-11-28T00:41:38,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741959_1135 (size=349780) 2024-11-28T00:41:40,878 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:59386 2024-11-28T00:41:40,878 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:48612 2024-11-28T00:41:41,750 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:48620 2024-11-28T00:41:41,757 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:59402 2024-11-28T00:41:43,920 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0002_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:41:43,947 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:41:46,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741960_1136 (size=14949) 2024-11-28T00:41:46,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741960_1136 (size=14949) 2024-11-28T00:41:46,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741960_1136 (size=14949) 2024-11-28T00:41:47,043 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000002/launch_container.sh] 2024-11-28T00:41:47,044 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000002/container_tokens] 2024-11-28T00:41:47,044 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000002/sysfs] 2024-11-28T00:41:49,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741962_1138 (size=5914) 2024-11-28T00:41:49,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741962_1138 (size=5914) 2024-11-28T00:41:49,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741962_1138 (size=5914) 2024-11-28T00:41:49,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741963_1139 (size=5102) 2024-11-28T00:41:49,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741963_1139 (size=5102) 2024-11-28T00:41:49,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741963_1139 (size=5102) 2024-11-28T00:41:49,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741964_1140 (size=8171) 2024-11-28T00:41:49,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741964_1140 (size=8171) 2024-11-28T00:41:49,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741964_1140 (size=8171) 2024-11-28T00:41:49,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741961_1137 (size=31744) 2024-11-28T00:41:49,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741961_1137 (size=31744) 2024-11-28T00:41:49,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741961_1137 (size=31744) 2024-11-28T00:41:49,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741965_1141 (size=465) 2024-11-28T00:41:49,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741965_1141 (size=465) 2024-11-28T00:41:49,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741965_1141 (size=465) 2024-11-28T00:41:49,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741966_1142 (size=31744) 2024-11-28T00:41:49,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741966_1142 (size=31744) 2024-11-28T00:41:49,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741966_1142 (size=31744) 2024-11-28T00:41:49,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741967_1143 (size=349780) 2024-11-28T00:41:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741967_1143 (size=349780) 2024-11-28T00:41:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741967_1143 (size=349780) 2024-11-28T00:41:49,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:54236 2024-11-28T00:41:49,885 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:54792 2024-11-28T00:41:49,890 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732754375741_0002_01_000003 is : 143 2024-11-28T00:41:49,891 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:54252 2024-11-28T00:41:49,902 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000003/launch_container.sh] 2024-11-28T00:41:49,902 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000003/container_tokens] 2024-11-28T00:41:49,902 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000003/sysfs] 2024-11-28T00:41:49,904 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732754375741_0002_01_000004 is : 143 2024-11-28T00:41:49,909 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000004/launch_container.sh] 2024-11-28T00:41:49,909 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000004/container_tokens] 2024-11-28T00:41:49,909 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000004/sysfs] 2024-11-28T00:41:49,912 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732754375741_0002_01_000005 is : 143 2024-11-28T00:41:49,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000005/launch_container.sh] 2024-11-28T00:41:49,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000005/container_tokens] 2024-11-28T00:41:49,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000005/sysfs] 2024-11-28T00:41:51,271 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:41:51,288 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:41:51,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-11-28T00:41:51,306 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:41:51,307 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:41:51,307 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-28T00:41:51,313 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-28T00:41:51,313 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-28T00:41:51,313 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349/.hbase-snapshot/testExportWithTargetName 2024-11-28T00:41:51,314 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-28T00:41:51,314 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754491349/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-28T00:41:51,327 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithTargetName 2024-11-28T00:41:51,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:51,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-28T00:41:51,333 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754511333"}]},"ts":"1732754511333"} 2024-11-28T00:41:51,337 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-28T00:41:51,337 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-28T00:41:51,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-28T00:41:51,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, UNASSIGN}] 2024-11-28T00:41:51,343 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, UNASSIGN 2024-11-28T00:41:51,343 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, UNASSIGN 2024-11-28T00:41:51,345 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=f7dd65dc1d931624811654da1d7eeaa0, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:51,345 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=ed0afa718b8fe519f06c5ee2df5fd2d3, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:41:51,350 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=97f25515df69,34347,1732754369169, table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T00:41:51,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, UNASSIGN because future has completed 2024-11-28T00:41:51,351 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:41:51,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7dd65dc1d931624811654da1d7eeaa0, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:41:51,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, UNASSIGN because future has completed 2024-11-28T00:41:51,352 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:41:51,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:41:51,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-28T00:41:51,505 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:51,505 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:41:51,505 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing f7dd65dc1d931624811654da1d7eeaa0, disabling compactions & flushes 2024-11-28T00:41:51,506 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:51,506 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:51,506 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. after waiting 0 ms 2024-11-28T00:41:51,506 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:51,507 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:51,508 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:41:51,508 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing ed0afa718b8fe519f06c5ee2df5fd2d3, disabling compactions & flushes 2024-11-28T00:41:51,508 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:51,508 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:51,508 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. after waiting 0 ms 2024-11-28T00:41:51,508 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:51,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:41:51,522 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:41:51,522 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0. 2024-11-28T00:41:51,522 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for f7dd65dc1d931624811654da1d7eeaa0: Waiting for close lock at 1732754511505Running coprocessor pre-close hooks at 1732754511505Disabling compacts and flushes for region at 1732754511505Disabling writes for close at 1732754511506 (+1 ms)Writing region close event to WAL at 1732754511513 (+7 ms)Running coprocessor post-close hooks at 1732754511521 (+8 ms)Closed at 1732754511522 (+1 ms) 2024-11-28T00:41:51,526 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:51,526 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:41:51,527 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:41:51,527 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3. 2024-11-28T00:41:51,528 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=f7dd65dc1d931624811654da1d7eeaa0, regionState=CLOSED 2024-11-28T00:41:51,528 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for ed0afa718b8fe519f06c5ee2df5fd2d3: Waiting for close lock at 1732754511508Running coprocessor pre-close hooks at 1732754511508Disabling compacts and flushes for region at 1732754511508Disabling writes for close at 1732754511508Writing region close event to WAL at 1732754511516 (+8 ms)Running coprocessor post-close hooks at 1732754511527 (+11 ms)Closed at 1732754511527 2024-11-28T00:41:51,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7dd65dc1d931624811654da1d7eeaa0, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:41:51,531 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:51,532 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=ed0afa718b8fe519f06c5ee2df5fd2d3, regionState=CLOSED 2024-11-28T00:41:51,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:41:51,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-11-28T00:41:51,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure f7dd65dc1d931624811654da1d7eeaa0, server=97f25515df69,38585,1732754368938 in 186 msec 2024-11-28T00:41:51,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-28T00:41:51,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7dd65dc1d931624811654da1d7eeaa0, UNASSIGN in 198 msec 2024-11-28T00:41:51,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure ed0afa718b8fe519f06c5ee2df5fd2d3, server=97f25515df69,34347,1732754369169 in 185 msec 2024-11-28T00:41:51,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-11-28T00:41:51,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed0afa718b8fe519f06c5ee2df5fd2d3, UNASSIGN in 199 msec 2024-11-28T00:41:51,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-11-28T00:41:51,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 207 msec 2024-11-28T00:41:51,549 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754511549"}]},"ts":"1732754511549"} 2024-11-28T00:41:51,552 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-28T00:41:51,552 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-28T00:41:51,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 227 msec 2024-11-28T00:41:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-28T00:41:51,648 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:41:51,649 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithTargetName 2024-11-28T00:41:51,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:51,652 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:51,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-28T00:41:51,660 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:51,666 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-28T00:41:51,672 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:51,679 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/recovered.edits] 2024-11-28T00:41:51,684 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:51,689 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/recovered.edits] 2024-11-28T00:41:51,689 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/cf/f9c682b572d14d0fbfad57ae80c88d89 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/cf/f9c682b572d14d0fbfad57ae80c88d89 2024-11-28T00:41:51,694 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0/recovered.edits/9.seqid 2024-11-28T00:41:51,695 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:51,696 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/cf/34e111a4619b4bc39650587d08103418 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/cf/34e111a4619b4bc39650587d08103418 2024-11-28T00:41:51,701 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3/recovered.edits/9.seqid 2024-11-28T00:41:51,702 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithTargetName/ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:51,702 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-28T00:41:51,703 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-28T00:41:51,704 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-11-28T00:41:51,710 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411280b96ec872f8a4ad08ab77dcb1944a649_ed0afa718b8fe519f06c5ee2df5fd2d3 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411280b96ec872f8a4ad08ab77dcb1944a649_ed0afa718b8fe519f06c5ee2df5fd2d3 2024-11-28T00:41:51,712 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241128d90da721406e4a079a6afc43e4bf865e_f7dd65dc1d931624811654da1d7eeaa0 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241128d90da721406e4a079a6afc43e4bf865e_f7dd65dc1d931624811654da1d7eeaa0 2024-11-28T00:41:51,713 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-28T00:41:51,719 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:51,723 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-28T00:41:51,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:41:51,844 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:41:51,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:41:51,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:41:51,846 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-28T00:41:51,846 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-28T00:41:51,847 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-28T00:41:51,847 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-28T00:41:51,850 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:51,850 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-28T00:41:51,851 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754511850"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:51,851 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754511850"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:51,854 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:51,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:41:51,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:41:51,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:51,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:51,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:41:51,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:51,855 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-28T00:41:51,855 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-28T00:41:51,856 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:41:51,856 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f7dd65dc1d931624811654da1d7eeaa0, NAME => 'testtb-testExportWithTargetName,,1732754489973.f7dd65dc1d931624811654da1d7eeaa0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ed0afa718b8fe519f06c5ee2df5fd2d3, NAME => 'testtb-testExportWithTargetName,1,1732754489973.ed0afa718b8fe519f06c5ee2df5fd2d3.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:41:51,856 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-28T00:41:51,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-28T00:41:51,856 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754511856"}]},"ts":"9223372036854775807"} 2024-11-28T00:41:51,860 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-28T00:41:51,861 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:41:51,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 212 msec 2024-11-28T00:41:51,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-28T00:41:51,969 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-28T00:41:51,969 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:41:51,995 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-28T00:41:51,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-28T00:41:52,002 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-28T00:41:52,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-28T00:41:52,072 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=790 (was 756) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43191 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35415 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:35749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:35749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-478372247_1 at /127.0.0.1:55298 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:55310 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2085 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:43191 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-478372247_1 at /127.0.0.1:50282 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:44672 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 125222) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:39220 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 806) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=610 (was 529) - SystemLoadAverage LEAK? -, ProcessCount=26 (was 20) - ProcessCount LEAK? -, AvailableMemoryMB=2795 (was 1299) - AvailableMemoryMB LEAK? - 2024-11-28T00:41:52,073 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-11-28T00:41:52,108 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=790, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=610, ProcessCount=26, AvailableMemoryMB=2789 2024-11-28T00:41:52,108 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-11-28T00:41:52,110 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:41:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:41:52,115 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:41:52,116 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-11-28T00:41:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:41:52,119 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:41:52,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741968_1144 (size=440) 2024-11-28T00:41:52,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741968_1144 (size=440) 2024-11-28T00:41:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741968_1144 (size=440) 2024-11-28T00:41:52,157 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cb80a9dac6c2e528157cc1200bb2f63a, NAME => 'testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:52,159 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 4dca358a081c535cc775fd96f8b16a8c, NAME => 'testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:52,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741969_1145 (size=65) 2024-11-28T00:41:52,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741969_1145 (size=65) 2024-11-28T00:41:52,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741969_1145 (size=65) 2024-11-28T00:41:52,212 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:52,212 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing cb80a9dac6c2e528157cc1200bb2f63a, disabling compactions & flushes 2024-11-28T00:41:52,212 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,212 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,212 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. after waiting 0 ms 2024-11-28T00:41:52,212 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,212 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,212 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for cb80a9dac6c2e528157cc1200bb2f63a: Waiting for close lock at 1732754512212Disabling compacts and flushes for region at 1732754512212Disabling writes for close at 1732754512212Writing region close event to WAL at 1732754512212Closed at 1732754512212 2024-11-28T00:41:52,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741970_1146 (size=65) 2024-11-28T00:41:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:41:52,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741970_1146 (size=65) 2024-11-28T00:41:52,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741970_1146 (size=65) 2024-11-28T00:41:52,229 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:52,229 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 4dca358a081c535cc775fd96f8b16a8c, disabling compactions & flushes 2024-11-28T00:41:52,229 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:52,229 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:52,229 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. after waiting 0 ms 2024-11-28T00:41:52,230 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:52,230 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:52,230 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 4dca358a081c535cc775fd96f8b16a8c: Waiting for close lock at 1732754512229Disabling compacts and flushes for region at 1732754512229Disabling writes for close at 1732754512229Writing region close event to WAL at 1732754512230 (+1 ms)Closed at 1732754512230 2024-11-28T00:41:52,231 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:41:52,232 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732754512231"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754512231"}]},"ts":"1732754512231"} 2024-11-28T00:41:52,232 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732754512231"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754512231"}]},"ts":"1732754512231"} 2024-11-28T00:41:52,236 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:41:52,237 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:41:52,237 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754512237"}]},"ts":"1732754512237"} 2024-11-28T00:41:52,240 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-28T00:41:52,240 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:41:52,242 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:41:52,242 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:41:52,242 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:41:52,242 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:41:52,242 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:41:52,242 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:41:52,242 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:41:52,242 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:41:52,242 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:41:52,242 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:41:52,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, ASSIGN}] 2024-11-28T00:41:52,244 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, ASSIGN 2024-11-28T00:41:52,244 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, ASSIGN 2024-11-28T00:41:52,245 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:41:52,245 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:41:52,395 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:41:52,396 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=4dca358a081c535cc775fd96f8b16a8c, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:41:52,396 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=cb80a9dac6c2e528157cc1200bb2f63a, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:52,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, ASSIGN because future has completed 2024-11-28T00:41:52,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:41:52,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, ASSIGN because future has completed 2024-11-28T00:41:52,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dca358a081c535cc775fd96f8b16a8c, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:41:52,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:41:52,561 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33447, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:41:52,562 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,562 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => cb80a9dac6c2e528157cc1200bb2f63a, NAME => 'testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:41:52,563 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. service=AccessControlService 2024-11-28T00:41:52,563 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:41:52,563 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,563 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:52,564 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,564 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,574 INFO [StoreOpener-cb80a9dac6c2e528157cc1200bb2f63a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,581 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:52,581 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 4dca358a081c535cc775fd96f8b16a8c, NAME => 'testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:41:52,582 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. service=AccessControlService 2024-11-28T00:41:52,582 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:41:52,583 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,583 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:52,583 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,583 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,588 INFO [StoreOpener-cb80a9dac6c2e528157cc1200bb2f63a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cb80a9dac6c2e528157cc1200bb2f63a columnFamilyName cf 2024-11-28T00:41:52,590 DEBUG [StoreOpener-cb80a9dac6c2e528157cc1200bb2f63a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:52,591 INFO [StoreOpener-cb80a9dac6c2e528157cc1200bb2f63a-1 {}] regionserver.HStore(327): Store=cb80a9dac6c2e528157cc1200bb2f63a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:41:52,592 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,593 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,594 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,594 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,594 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,597 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,599 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:41:52,600 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened cb80a9dac6c2e528157cc1200bb2f63a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68902626, jitterRate=0.02672913670539856}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:41:52,600 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,601 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for cb80a9dac6c2e528157cc1200bb2f63a: Running coprocessor pre-open hook at 1732754512564Writing region info on filesystem at 1732754512564Initializing all the Stores at 1732754512565 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754512565Cleaning up temporary data from old regions at 1732754512594 (+29 ms)Running coprocessor post-open hooks at 1732754512600 (+6 ms)Region opened successfully at 1732754512601 (+1 ms) 2024-11-28T00:41:52,603 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a., pid=66, masterSystemTime=1732754512557 2024-11-28T00:41:52,606 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,606 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,607 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=cb80a9dac6c2e528157cc1200bb2f63a, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:41:52,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:41:52,613 INFO [StoreOpener-4dca358a081c535cc775fd96f8b16a8c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-11-28T00:41:52,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a, server=97f25515df69,38585,1732754368938 in 212 msec 2024-11-28T00:41:52,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, ASSIGN in 376 msec 2024-11-28T00:41:52,626 INFO [StoreOpener-4dca358a081c535cc775fd96f8b16a8c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dca358a081c535cc775fd96f8b16a8c columnFamilyName cf 2024-11-28T00:41:52,627 DEBUG [StoreOpener-4dca358a081c535cc775fd96f8b16a8c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:52,628 INFO [StoreOpener-4dca358a081c535cc775fd96f8b16a8c-1 {}] regionserver.HStore(327): Store=4dca358a081c535cc775fd96f8b16a8c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:41:52,629 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,630 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,631 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,631 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,631 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,634 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,650 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:41:52,654 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 4dca358a081c535cc775fd96f8b16a8c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71576627, jitterRate=0.06657485663890839}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:41:52,654 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,654 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 4dca358a081c535cc775fd96f8b16a8c: Running coprocessor pre-open hook at 1732754512583Writing region info on filesystem at 1732754512583Initializing all the Stores at 1732754512602 (+19 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754512603 (+1 ms)Cleaning up temporary data from old regions at 1732754512631 (+28 ms)Running coprocessor post-open hooks at 1732754512654 (+23 ms)Region opened successfully at 1732754512654 2024-11-28T00:41:52,656 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c., pid=67, masterSystemTime=1732754512559 2024-11-28T00:41:52,664 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:52,664 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:52,665 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=4dca358a081c535cc775fd96f8b16a8c, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:41:52,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dca358a081c535cc775fd96f8b16a8c, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:41:52,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-11-28T00:41:52,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 4dca358a081c535cc775fd96f8b16a8c, server=97f25515df69,43795,1732754369084 in 265 msec 2024-11-28T00:41:52,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-28T00:41:52,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, ASSIGN in 431 msec 2024-11-28T00:41:52,678 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:41:52,678 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754512678"}]},"ts":"1732754512678"} 2024-11-28T00:41:52,681 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-28T00:41:52,682 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:41:52,682 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-28T00:41:52,687 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:41:52,707 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:52,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:52,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:52,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:52,717 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:52,717 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:52,718 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:52,718 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:52,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 607 msec 2024-11-28T00:41:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:41:52,749 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:41:52,749 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:52,753 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-28T00:41:52,753 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:52,754 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:41:52,756 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:52,765 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:52,774 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:52,779 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:41:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754512779 (current time:1732754512779). 2024-11-28T00:41:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:41:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-28T00:41:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:41:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20344894, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:52,782 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:52,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:52,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:52,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@568e0808, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:52,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:52,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:52,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:52,786 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54080, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:52,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5840cb69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:52,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:52,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:52,791 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46440, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:52,792 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:52,793 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5992ef97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:52,798 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:52,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:52,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:52,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51ddc7c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:52,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:52,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:52,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:52,800 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54094, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:52,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20aa7b76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:52,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:52,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:52,805 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46452, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:52,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:41:52,810 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:41:52,811 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:41:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:41:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-28T00:41:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-28T00:41:52,815 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:41:52,817 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:41:52,820 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:41:52,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741971_1147 (size=161) 2024-11-28T00:41:52,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741971_1147 (size=161) 2024-11-28T00:41:52,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741971_1147 (size=161) 2024-11-28T00:41:52,856 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:41:52,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c}] 2024-11-28T00:41:52,861 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:52,862 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-28T00:41:53,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-11-28T00:41:53,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:53,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-11-28T00:41:53,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for cb80a9dac6c2e528157cc1200bb2f63a: 2024-11-28T00:41:53,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-28T00:41:53,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-28T00:41:53,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:53,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:41:53,028 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:53,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 4dca358a081c535cc775fd96f8b16a8c: 2024-11-28T00:41:53,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-28T00:41:53,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-28T00:41:53,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:53,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:41:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-28T00:41:53,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741972_1148 (size=68) 2024-11-28T00:41:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741972_1148 (size=68) 2024-11-28T00:41:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741972_1148 (size=68) 2024-11-28T00:41:53,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:53,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-11-28T00:41:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-11-28T00:41:53,138 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:53,139 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:53,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a in 284 msec 2024-11-28T00:41:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741973_1149 (size=68) 2024-11-28T00:41:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741973_1149 (size=68) 2024-11-28T00:41:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741973_1149 (size=68) 2024-11-28T00:41:53,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:53,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-28T00:41:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-11-28T00:41:53,162 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:53,162 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:53,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-11-28T00:41:53,170 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:41:53,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c in 308 msec 2024-11-28T00:41:53,174 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:41:53,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:41:53,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:41:53,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:53,177 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:41:53,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741974_1150 (size=60) 2024-11-28T00:41:53,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741974_1150 (size=60) 2024-11-28T00:41:53,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741974_1150 (size=60) 2024-11-28T00:41:53,218 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:41:53,218 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-28T00:41:53,220 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-28T00:41:53,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741975_1151 (size=641) 2024-11-28T00:41:53,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741975_1151 (size=641) 2024-11-28T00:41:53,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741975_1151 (size=641) 2024-11-28T00:41:53,263 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:41:53,271 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:41:53,271 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-28T00:41:53,273 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:41:53,273 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-28T00:41:53,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 461 msec 2024-11-28T00:41:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-28T00:41:53,438 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:41:53,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:41:53,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:41:53,463 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:53,473 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-28T00:41:53,474 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:53,474 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:41:53,477 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:53,487 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:53,523 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:53,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:41:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754513527 (current time:1732754513527). 2024-11-28T00:41:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:41:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-28T00:41:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:41:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@440a326f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:53,530 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:53,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:53,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:53,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f32709e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:53,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:53,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:53,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:53,531 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54112, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:53,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2caffb42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:53,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:53,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:53,535 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46462, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:53,536 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:53,536 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20ad2ce2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:53,541 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:53,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:53,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:53,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42840b82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:53,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:53,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:53,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:53,543 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:53,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f860418, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:53,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:53,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:53,548 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:53,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:41:53,553 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:41:53,554 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:41:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:41:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-28T00:41:53,557 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:41:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:41:53,560 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:41:53,564 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:41:53,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741976_1152 (size=156) 2024-11-28T00:41:53,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741976_1152 (size=156) 2024-11-28T00:41:53,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741976_1152 (size=156) 2024-11-28T00:41:53,606 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:41:53,606 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c}] 2024-11-28T00:41:53,607 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:53,607 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:41:53,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-11-28T00:41:53,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-11-28T00:41:53,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:53,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:53,761 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing cb80a9dac6c2e528157cc1200bb2f63a 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-28T00:41:53,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 4dca358a081c535cc775fd96f8b16a8c 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-28T00:41:53,828 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128c2396ded687d4905b379720dd20aa855_4dca358a081c535cc775fd96f8b16a8c is 71, key is 1854eb8d5dade492957085dee93193bb/cf:q/1732754513459/Put/seqid=0 2024-11-28T00:41:53,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282327e41cbcd04e03b7b881a9045ebc3f_cb80a9dac6c2e528157cc1200bb2f63a is 71, key is 014a2715cffa76f53d91bef430928ddd/cf:q/1732754513450/Put/seqid=0 2024-11-28T00:41:53,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:41:53,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741978_1154 (size=5311) 2024-11-28T00:41:53,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741978_1154 (size=5311) 2024-11-28T00:41:53,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741978_1154 (size=5311) 2024-11-28T00:41:53,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:53,925 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282327e41cbcd04e03b7b881a9045ebc3f_cb80a9dac6c2e528157cc1200bb2f63a to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411282327e41cbcd04e03b7b881a9045ebc3f_cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:53,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741977_1153 (size=7962) 2024-11-28T00:41:53,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741977_1153 (size=7962) 2024-11-28T00:41:53,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/.tmp/cf/2697cb5e324a4d6c9b7fb6068806b0fa, store: [table=testtb-testExportWithResetTtl family=cf region=cb80a9dac6c2e528157cc1200bb2f63a] 2024-11-28T00:41:53,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741977_1153 (size=7962) 2024-11-28T00:41:53,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/.tmp/cf/2697cb5e324a4d6c9b7fb6068806b0fa is 206, key is 049d6730b842dd50b7e7f3dd7ca433b93/cf:q/1732754513450/Put/seqid=0 2024-11-28T00:41:53,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:53,970 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128c2396ded687d4905b379720dd20aa855_4dca358a081c535cc775fd96f8b16a8c to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241128c2396ded687d4905b379720dd20aa855_4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:53,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/.tmp/cf/d61d31a7b43c4ecfa73a7cf444fe25b4, store: [table=testtb-testExportWithResetTtl family=cf region=4dca358a081c535cc775fd96f8b16a8c] 2024-11-28T00:41:53,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/.tmp/cf/d61d31a7b43c4ecfa73a7cf444fe25b4 is 206, key is 12d06915b82be51c6b1d5e2002f95d062/cf:q/1732754513459/Put/seqid=0 2024-11-28T00:41:53,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741979_1155 (size=6510) 2024-11-28T00:41:53,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741979_1155 (size=6510) 2024-11-28T00:41:53,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741979_1155 (size=6510) 2024-11-28T00:41:53,991 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/.tmp/cf/2697cb5e324a4d6c9b7fb6068806b0fa 2024-11-28T00:41:54,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/.tmp/cf/2697cb5e324a4d6c9b7fb6068806b0fa as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/cf/2697cb5e324a4d6c9b7fb6068806b0fa 2024-11-28T00:41:54,012 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/cf/2697cb5e324a4d6c9b7fb6068806b0fa, entries=6, sequenceid=6, filesize=6.4 K 2024-11-28T00:41:54,013 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for cb80a9dac6c2e528157cc1200bb2f63a in 253ms, sequenceid=6, compaction requested=false 2024-11-28T00:41:54,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-28T00:41:54,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for cb80a9dac6c2e528157cc1200bb2f63a: 2024-11-28T00:41:54,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. for snaptb0-testExportWithResetTtl completed. 2024-11-28T00:41:54,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:41:54,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:54,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/cf/2697cb5e324a4d6c9b7fb6068806b0fa] hfiles 2024-11-28T00:41:54,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/cf/2697cb5e324a4d6c9b7fb6068806b0fa for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:41:54,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741980_1156 (size=14249) 2024-11-28T00:41:54,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741980_1156 (size=14249) 2024-11-28T00:41:54,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741980_1156 (size=14249) 2024-11-28T00:41:54,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/.tmp/cf/d61d31a7b43c4ecfa73a7cf444fe25b4 2024-11-28T00:41:54,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741981_1157 (size=107) 2024-11-28T00:41:54,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741981_1157 (size=107) 2024-11-28T00:41:54,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741981_1157 (size=107) 2024-11-28T00:41:54,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:41:54,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-28T00:41:54,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-11-28T00:41:54,037 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:54,037 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:54,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a in 433 msec 2024-11-28T00:41:54,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/.tmp/cf/d61d31a7b43c4ecfa73a7cf444fe25b4 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/cf/d61d31a7b43c4ecfa73a7cf444fe25b4 2024-11-28T00:41:54,052 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/cf/d61d31a7b43c4ecfa73a7cf444fe25b4, entries=44, sequenceid=6, filesize=13.9 K 2024-11-28T00:41:54,053 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 4dca358a081c535cc775fd96f8b16a8c in 293ms, sequenceid=6, compaction requested=false 2024-11-28T00:41:54,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 4dca358a081c535cc775fd96f8b16a8c: 2024-11-28T00:41:54,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. for snaptb0-testExportWithResetTtl completed. 2024-11-28T00:41:54,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:41:54,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:54,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/cf/d61d31a7b43c4ecfa73a7cf444fe25b4] hfiles 2024-11-28T00:41:54,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/cf/d61d31a7b43c4ecfa73a7cf444fe25b4 for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:41:54,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741982_1158 (size=107) 2024-11-28T00:41:54,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741982_1158 (size=107) 2024-11-28T00:41:54,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741982_1158 (size=107) 2024-11-28T00:41:54,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:41:54,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-11-28T00:41:54,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-11-28T00:41:54,067 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:54,068 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:54,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-28T00:41:54,070 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:41:54,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4dca358a081c535cc775fd96f8b16a8c in 463 msec 2024-11-28T00:41:54,072 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:41:54,073 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:41:54,073 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:41:54,073 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:54,074 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241128c2396ded687d4905b379720dd20aa855_4dca358a081c535cc775fd96f8b16a8c, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411282327e41cbcd04e03b7b881a9045ebc3f_cb80a9dac6c2e528157cc1200bb2f63a] hfiles 2024-11-28T00:41:54,075 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241128c2396ded687d4905b379720dd20aa855_4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:41:54,075 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411282327e41cbcd04e03b7b881a9045ebc3f_cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:41:54,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741983_1159 (size=291) 2024-11-28T00:41:54,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741983_1159 (size=291) 2024-11-28T00:41:54,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741983_1159 (size=291) 2024-11-28T00:41:54,091 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:41:54,092 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-28T00:41:54,092 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-28T00:41:54,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741984_1160 (size=951) 2024-11-28T00:41:54,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741984_1160 (size=951) 2024-11-28T00:41:54,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741984_1160 (size=951) 2024-11-28T00:41:54,116 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:41:54,123 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:41:54,124 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-28T00:41:54,126 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:41:54,126 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-28T00:41:54,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 571 msec 2024-11-28T00:41:54,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:41:54,188 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:41:54,190 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:41:54,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-28T00:41:54,193 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:41:54,193 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-11-28T00:41:54,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:41:54,195 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:41:54,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741985_1161 (size=433) 2024-11-28T00:41:54,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741985_1161 (size=433) 2024-11-28T00:41:54,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741985_1161 (size=433) 2024-11-28T00:41:54,217 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6885cdc89d76dd248176a0e4cf7684f8, NAME => 'testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:54,217 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0b28daf4d4c5becdc3e9e69ec3afa3fa, NAME => 'testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:54,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741986_1162 (size=58) 2024-11-28T00:41:54,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741987_1163 (size=58) 2024-11-28T00:41:54,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741986_1162 (size=58) 2024-11-28T00:41:54,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741987_1163 (size=58) 2024-11-28T00:41:54,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741987_1163 (size=58) 2024-11-28T00:41:54,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741986_1162 (size=58) 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 0b28daf4d4c5becdc3e9e69ec3afa3fa, disabling compactions & flushes 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 6885cdc89d76dd248176a0e4cf7684f8, disabling compactions & flushes 2024-11-28T00:41:54,243 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,243 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. after waiting 0 ms 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. after waiting 0 ms 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:54,243 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,243 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0b28daf4d4c5becdc3e9e69ec3afa3fa: Waiting for close lock at 1732754514243Disabling compacts and flushes for region at 1732754514243Disabling writes for close at 1732754514243Writing region close event to WAL at 1732754514243Closed at 1732754514243 2024-11-28T00:41:54,243 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6885cdc89d76dd248176a0e4cf7684f8: Waiting for close lock at 1732754514243Disabling compacts and flushes for region at 1732754514243Disabling writes for close at 1732754514243Writing region close event to WAL at 1732754514243Closed at 1732754514243 2024-11-28T00:41:54,245 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:41:54,245 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732754514245"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754514245"}]},"ts":"1732754514245"} 2024-11-28T00:41:54,245 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732754514245"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754514245"}]},"ts":"1732754514245"} 2024-11-28T00:41:54,248 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:41:54,257 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:41:54,257 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754514257"}]},"ts":"1732754514257"} 2024-11-28T00:41:54,259 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-28T00:41:54,259 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:41:54,261 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:41:54,261 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:41:54,261 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:41:54,261 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:41:54,261 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:41:54,261 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:41:54,261 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:41:54,261 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:41:54,261 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:41:54,261 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:41:54,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, ASSIGN}] 2024-11-28T00:41:54,266 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, ASSIGN 2024-11-28T00:41:54,266 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, ASSIGN 2024-11-28T00:41:54,269 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:41:54,269 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:41:54,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:41:54,420 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:41:54,420 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=6885cdc89d76dd248176a0e4cf7684f8, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:41:54,420 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=0b28daf4d4c5becdc3e9e69ec3afa3fa, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:41:54,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, ASSIGN because future has completed 2024-11-28T00:41:54,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:41:54,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, ASSIGN because future has completed 2024-11-28T00:41:54,424 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:41:54,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:41:54,582 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,582 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 0b28daf4d4c5becdc3e9e69ec3afa3fa, NAME => 'testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:41:54,583 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. service=AccessControlService 2024-11-28T00:41:54,583 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:41:54,583 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,583 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:54,584 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,584 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,584 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:54,585 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 6885cdc89d76dd248176a0e4cf7684f8, NAME => 'testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:41:54,585 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. service=AccessControlService 2024-11-28T00:41:54,585 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:41:54,585 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,585 INFO [StoreOpener-0b28daf4d4c5becdc3e9e69ec3afa3fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,585 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:41:54,586 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,586 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,587 INFO [StoreOpener-0b28daf4d4c5becdc3e9e69ec3afa3fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0b28daf4d4c5becdc3e9e69ec3afa3fa columnFamilyName cf 2024-11-28T00:41:54,589 DEBUG [StoreOpener-0b28daf4d4c5becdc3e9e69ec3afa3fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:54,589 INFO [StoreOpener-6885cdc89d76dd248176a0e4cf7684f8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,589 INFO [StoreOpener-0b28daf4d4c5becdc3e9e69ec3afa3fa-1 {}] regionserver.HStore(327): Store=0b28daf4d4c5becdc3e9e69ec3afa3fa/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:41:54,590 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,590 INFO [StoreOpener-6885cdc89d76dd248176a0e4cf7684f8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6885cdc89d76dd248176a0e4cf7684f8 columnFamilyName cf 2024-11-28T00:41:54,591 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,591 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,591 DEBUG [StoreOpener-6885cdc89d76dd248176a0e4cf7684f8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:54,592 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,592 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,593 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,594 INFO [StoreOpener-6885cdc89d76dd248176a0e4cf7684f8-1 {}] regionserver.HStore(327): Store=6885cdc89d76dd248176a0e4cf7684f8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:41:54,594 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,596 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,597 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,597 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:41:54,597 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,597 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,597 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 0b28daf4d4c5becdc3e9e69ec3afa3fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72152049, jitterRate=0.07514931261539459}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:41:54,598 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,598 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 0b28daf4d4c5becdc3e9e69ec3afa3fa: Running coprocessor pre-open hook at 1732754514584Writing region info on filesystem at 1732754514584Initializing all the Stores at 1732754514585 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754514585Cleaning up temporary data from old regions at 1732754514592 (+7 ms)Running coprocessor post-open hooks at 1732754514598 (+6 ms)Region opened successfully at 1732754514598 2024-11-28T00:41:54,599 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,600 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa., pid=78, masterSystemTime=1732754514576 2024-11-28T00:41:54,602 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:41:54,602 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,602 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,603 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=0b28daf4d4c5becdc3e9e69ec3afa3fa, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:41:54,603 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 6885cdc89d76dd248176a0e4cf7684f8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64560209, jitterRate=-0.0379779189825058}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:41:54,603 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,603 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 6885cdc89d76dd248176a0e4cf7684f8: Running coprocessor pre-open hook at 1732754514586Writing region info on filesystem at 1732754514586Initializing all the Stores at 1732754514587 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754514587Cleaning up temporary data from old regions at 1732754514597 (+10 ms)Running coprocessor post-open hooks at 1732754514603 (+6 ms)Region opened successfully at 1732754514603 2024-11-28T00:41:54,604 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8., pid=77, masterSystemTime=1732754514575 2024-11-28T00:41:54,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:41:54,608 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:54,608 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:54,611 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=6885cdc89d76dd248176a0e4cf7684f8, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:41:54,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-11-28T00:41:54,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa, server=97f25515df69,43795,1732754369084 in 182 msec 2024-11-28T00:41:54,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:41:54,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, ASSIGN in 352 msec 2024-11-28T00:41:54,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-28T00:41:54,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8, server=97f25515df69,34347,1732754369169 in 193 msec 2024-11-28T00:41:54,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-11-28T00:41:54,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, ASSIGN in 357 msec 2024-11-28T00:41:54,621 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:41:54,621 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754514621"}]},"ts":"1732754514621"} 2024-11-28T00:41:54,623 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-28T00:41:54,624 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:41:54,625 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-28T00:41:54,636 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:41:54,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:54,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:54,696 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:54,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:41:54,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,753 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,753 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:41:54,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 555 msec 2024-11-28T00:41:54,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:41:54,818 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:41:54,818 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,821 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-28T00:41:54,822 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,822 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:41:54,824 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,828 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,832 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:41:54,846 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:41:54,848 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,851 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-28T00:41:54,851 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:54,851 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:41:54,853 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,858 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,866 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:41:54,869 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-28T00:41:54,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754514869 (current time:1732754514869). 2024-11-28T00:41:54,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-28T00:41:54,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:41:54,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b15c571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:54,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:54,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:54,871 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:54,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:54,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:54,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5944c829, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:54,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:54,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:54,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:54,873 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54154, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:54,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61a28c8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:54,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:54,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:54,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:54,875 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46480, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:54,876 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:54,877 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1667e621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:41:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:41:54,879 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:41:54,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:41:54,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:41:54,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25502328, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:54,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:41:54,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:41:54,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:54,880 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54172, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:41:54,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@685aadda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:41:54,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:41:54,882 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:41:54,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:41:54,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46490, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:41:54,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:41:54,887 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:41:54,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:41:54,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:54,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:41:54,888 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:41:54,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:41:54,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:41:54,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-28T00:41:54,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-28T00:41:54,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:41:54,891 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:41:54,892 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:41:54,894 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:41:54,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741988_1164 (size=143) 2024-11-28T00:41:54,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741988_1164 (size=143) 2024-11-28T00:41:54,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741988_1164 (size=143) 2024-11-28T00:41:54,900 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:41:54,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8}] 2024-11-28T00:41:54,901 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:54,901 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:54,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:41:55,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-11-28T00:41:55,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-11-28T00:41:55,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:55,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:55,053 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 6885cdc89d76dd248176a0e4cf7684f8 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-28T00:41:55,053 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 0b28daf4d4c5becdc3e9e69ec3afa3fa 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-28T00:41:55,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d35448b0042147da972dfdf64905632f_0b28daf4d4c5becdc3e9e69ec3afa3fa is 71, key is 097bfae9933f2ff3c653f5bc84ecaf9e/cf:q/1732754514842/Put/seqid=0 2024-11-28T00:41:55,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128b011514b64dc493a938b20bf934e6040_6885cdc89d76dd248176a0e4cf7684f8 is 71, key is 2e2ee2f28097737511771504a6aff79d/cf:q/1732754514845/Put/seqid=0 2024-11-28T00:41:55,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741989_1165 (size=5032) 2024-11-28T00:41:55,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741989_1165 (size=5032) 2024-11-28T00:41:55,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741989_1165 (size=5032) 2024-11-28T00:41:55,082 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:55,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741990_1166 (size=8242) 2024-11-28T00:41:55,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741990_1166 (size=8242) 2024-11-28T00:41:55,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741990_1166 (size=8242) 2024-11-28T00:41:55,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:55,088 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d35448b0042147da972dfdf64905632f_0b28daf4d4c5becdc3e9e69ec3afa3fa to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128d35448b0042147da972dfdf64905632f_0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:55,088 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128b011514b64dc493a938b20bf934e6040_6885cdc89d76dd248176a0e4cf7684f8 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128b011514b64dc493a938b20bf934e6040_6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:55,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/.tmp/cf/f9d4e02b2b65447e81c0dc6b00b35723, store: [table=testExportWithResetTtl family=cf region=6885cdc89d76dd248176a0e4cf7684f8] 2024-11-28T00:41:55,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/.tmp/cf/d614f665d0234489b13f5a75ded078cf, store: [table=testExportWithResetTtl family=cf region=0b28daf4d4c5becdc3e9e69ec3afa3fa] 2024-11-28T00:41:55,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/.tmp/cf/f9d4e02b2b65447e81c0dc6b00b35723 is 199, key is 10fe1a8379d6f49c45b791ed2d8e4be2f/cf:q/1732754514845/Put/seqid=0 2024-11-28T00:41:55,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/.tmp/cf/d614f665d0234489b13f5a75ded078cf is 199, key is 01668fc053b7bd4273bdd6b2e9bd4f869/cf:q/1732754514842/Put/seqid=0 2024-11-28T00:41:55,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741991_1167 (size=14714) 2024-11-28T00:41:55,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741991_1167 (size=14714) 2024-11-28T00:41:55,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741991_1167 (size=14714) 2024-11-28T00:41:55,097 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/.tmp/cf/f9d4e02b2b65447e81c0dc6b00b35723 2024-11-28T00:41:55,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/.tmp/cf/f9d4e02b2b65447e81c0dc6b00b35723 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/cf/f9d4e02b2b65447e81c0dc6b00b35723 2024-11-28T00:41:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741992_1168 (size=5679) 2024-11-28T00:41:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741992_1168 (size=5679) 2024-11-28T00:41:55,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741992_1168 (size=5679) 2024-11-28T00:41:55,110 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/.tmp/cf/d614f665d0234489b13f5a75ded078cf 2024-11-28T00:41:55,110 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/cf/f9d4e02b2b65447e81c0dc6b00b35723, entries=48, sequenceid=5, filesize=14.4 K 2024-11-28T00:41:55,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6885cdc89d76dd248176a0e4cf7684f8 in 58ms, sequenceid=5, compaction requested=false 2024-11-28T00:41:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-28T00:41:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 6885cdc89d76dd248176a0e4cf7684f8: 2024-11-28T00:41:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. for snaptb-testExportWithResetTtl completed. 2024-11-28T00:41:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:41:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/cf/f9d4e02b2b65447e81c0dc6b00b35723] hfiles 2024-11-28T00:41:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/cf/f9d4e02b2b65447e81c0dc6b00b35723 for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:41:55,117 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/.tmp/cf/d614f665d0234489b13f5a75ded078cf as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/cf/d614f665d0234489b13f5a75ded078cf 2024-11-28T00:41:55,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741993_1169 (size=100) 2024-11-28T00:41:55,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741993_1169 (size=100) 2024-11-28T00:41:55,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741993_1169 (size=100) 2024-11-28T00:41:55,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:41:55,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-11-28T00:41:55,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-11-28T00:41:55,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:55,123 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:55,124 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/cf/d614f665d0234489b13f5a75ded078cf, entries=2, sequenceid=5, filesize=5.5 K 2024-11-28T00:41:55,125 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 0b28daf4d4c5becdc3e9e69ec3afa3fa in 72ms, sequenceid=5, compaction requested=false 2024-11-28T00:41:55,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 0b28daf4d4c5becdc3e9e69ec3afa3fa: 2024-11-28T00:41:55,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. for snaptb-testExportWithResetTtl completed. 2024-11-28T00:41:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:41:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:41:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/cf/d614f665d0234489b13f5a75ded078cf] hfiles 2024-11-28T00:41:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/cf/d614f665d0234489b13f5a75ded078cf for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:41:55,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8 in 225 msec 2024-11-28T00:41:55,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741994_1170 (size=100) 2024-11-28T00:41:55,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741994_1170 (size=100) 2024-11-28T00:41:55,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741994_1170 (size=100) 2024-11-28T00:41:55,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:41:55,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-28T00:41:55,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-11-28T00:41:55,135 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:55,135 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:55,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-11-28T00:41:55,137 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:41:55,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa in 236 msec 2024-11-28T00:41:55,138 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:41:55,139 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:41:55,139 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:41:55,139 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:41:55,140 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128b011514b64dc493a938b20bf934e6040_6885cdc89d76dd248176a0e4cf7684f8, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128d35448b0042147da972dfdf64905632f_0b28daf4d4c5becdc3e9e69ec3afa3fa] hfiles 2024-11-28T00:41:55,140 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128b011514b64dc493a938b20bf934e6040_6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:41:55,140 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128d35448b0042147da972dfdf64905632f_0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:41:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741995_1171 (size=284) 2024-11-28T00:41:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741995_1171 (size=284) 2024-11-28T00:41:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741995_1171 (size=284) 2024-11-28T00:41:55,149 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:41:55,150 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-28T00:41:55,150 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-28T00:41:55,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741996_1172 (size=923) 2024-11-28T00:41:55,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741996_1172 (size=923) 2024-11-28T00:41:55,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741996_1172 (size=923) 2024-11-28T00:41:55,169 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:41:55,176 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:41:55,177 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-28T00:41:55,178 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:41:55,178 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-28T00:41:55,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 289 msec 2024-11-28T00:41:55,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:41:55,209 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:41:55,220 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220 2024-11-28T00:41:55,220 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:55,256 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:41:55,256 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-28T00:41:55,257 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:41:55,262 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-28T00:41:55,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741997_1173 (size=923) 2024-11-28T00:41:55,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741997_1173 (size=923) 2024-11-28T00:41:55,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741997_1173 (size=923) 2024-11-28T00:41:55,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741998_1174 (size=143) 2024-11-28T00:41:55,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741998_1174 (size=143) 2024-11-28T00:41:55,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741998_1174 (size=143) 2024-11-28T00:41:55,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741999_1175 (size=141) 2024-11-28T00:41:55,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741999_1175 (size=141) 2024-11-28T00:41:55,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741999_1175 (size=141) 2024-11-28T00:41:55,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:55,295 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:55,295 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:55,963 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0002_000001 (auth:SIMPLE) from 127.0.0.1:54692 2024-11-28T00:41:55,972 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000001/launch_container.sh] 2024-11-28T00:41:55,972 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000001/container_tokens] 2024-11-28T00:41:55,972 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0002/container_1732754375741_0002_01_000001/sysfs] 2024-11-28T00:41:56,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-18299269007357328678.jar 2024-11-28T00:41:56,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-229287441077166821.jar 2024-11-28T00:41:56,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:41:56,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:41:56,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:41:56,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:41:56,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:41:56,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:41:56,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:41:56,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:41:56,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:41:56,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:41:56,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:41:56,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:41:56,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:56,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:56,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:41:56,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:56,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:41:56,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:41:56,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:41:56,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742000_1176 (size=24020) 2024-11-28T00:41:56,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742000_1176 (size=24020) 2024-11-28T00:41:56,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742000_1176 (size=24020) 2024-11-28T00:41:56,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742001_1177 (size=77755) 2024-11-28T00:41:56,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742001_1177 (size=77755) 2024-11-28T00:41:56,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742001_1177 (size=77755) 2024-11-28T00:41:56,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742002_1178 (size=131360) 2024-11-28T00:41:56,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742002_1178 (size=131360) 2024-11-28T00:41:56,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742002_1178 (size=131360) 2024-11-28T00:41:56,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742003_1179 (size=111793) 2024-11-28T00:41:56,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742003_1179 (size=111793) 2024-11-28T00:41:56,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742003_1179 (size=111793) 2024-11-28T00:41:56,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742004_1180 (size=1832290) 2024-11-28T00:41:56,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742004_1180 (size=1832290) 2024-11-28T00:41:56,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742004_1180 (size=1832290) 2024-11-28T00:41:56,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742005_1181 (size=8360005) 2024-11-28T00:41:56,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742005_1181 (size=8360005) 2024-11-28T00:41:56,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742005_1181 (size=8360005) 2024-11-28T00:41:56,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742006_1182 (size=503880) 2024-11-28T00:41:56,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742006_1182 (size=503880) 2024-11-28T00:41:56,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742006_1182 (size=503880) 2024-11-28T00:41:56,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742007_1183 (size=322274) 2024-11-28T00:41:56,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742007_1183 (size=322274) 2024-11-28T00:41:56,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742007_1183 (size=322274) 2024-11-28T00:41:56,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742008_1184 (size=20406) 2024-11-28T00:41:56,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742008_1184 (size=20406) 2024-11-28T00:41:56,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742008_1184 (size=20406) 2024-11-28T00:41:56,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742009_1185 (size=45609) 2024-11-28T00:41:56,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742009_1185 (size=45609) 2024-11-28T00:41:56,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742009_1185 (size=45609) 2024-11-28T00:41:56,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742010_1186 (size=136454) 2024-11-28T00:41:56,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742010_1186 (size=136454) 2024-11-28T00:41:56,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742010_1186 (size=136454) 2024-11-28T00:41:56,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742011_1187 (size=1597136) 2024-11-28T00:41:56,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742011_1187 (size=1597136) 2024-11-28T00:41:56,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742011_1187 (size=1597136) 2024-11-28T00:41:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742012_1188 (size=30873) 2024-11-28T00:41:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742012_1188 (size=30873) 2024-11-28T00:41:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742012_1188 (size=30873) 2024-11-28T00:41:56,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742013_1189 (size=29229) 2024-11-28T00:41:56,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742013_1189 (size=29229) 2024-11-28T00:41:56,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742013_1189 (size=29229) 2024-11-28T00:41:56,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742014_1190 (size=903861) 2024-11-28T00:41:56,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742014_1190 (size=903861) 2024-11-28T00:41:56,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742014_1190 (size=903861) 2024-11-28T00:41:56,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742015_1191 (size=5175431) 2024-11-28T00:41:56,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742015_1191 (size=5175431) 2024-11-28T00:41:56,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742015_1191 (size=5175431) 2024-11-28T00:41:56,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742016_1192 (size=232881) 2024-11-28T00:41:56,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742016_1192 (size=232881) 2024-11-28T00:41:56,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742016_1192 (size=232881) 2024-11-28T00:41:56,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742017_1193 (size=1323991) 2024-11-28T00:41:56,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742017_1193 (size=1323991) 2024-11-28T00:41:56,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742017_1193 (size=1323991) 2024-11-28T00:41:56,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742018_1194 (size=4695811) 2024-11-28T00:41:56,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742018_1194 (size=4695811) 2024-11-28T00:41:56,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742018_1194 (size=4695811) 2024-11-28T00:41:56,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742019_1195 (size=1877034) 2024-11-28T00:41:56,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742019_1195 (size=1877034) 2024-11-28T00:41:56,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742019_1195 (size=1877034) 2024-11-28T00:41:56,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742020_1196 (size=217555) 2024-11-28T00:41:56,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742020_1196 (size=217555) 2024-11-28T00:41:56,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742020_1196 (size=217555) 2024-11-28T00:41:56,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742021_1197 (size=440956) 2024-11-28T00:41:56,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742021_1197 (size=440956) 2024-11-28T00:41:56,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742021_1197 (size=440956) 2024-11-28T00:41:56,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742022_1198 (size=4188619) 2024-11-28T00:41:56,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742022_1198 (size=4188619) 2024-11-28T00:41:56,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742022_1198 (size=4188619) 2024-11-28T00:41:56,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742023_1199 (size=127628) 2024-11-28T00:41:56,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742023_1199 (size=127628) 2024-11-28T00:41:56,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742023_1199 (size=127628) 2024-11-28T00:41:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742024_1200 (size=6424743) 2024-11-28T00:41:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742024_1200 (size=6424743) 2024-11-28T00:41:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742024_1200 (size=6424743) 2024-11-28T00:41:56,615 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:41:56,617 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-28T00:41:56,620 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-11-28T00:41:56,620 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:41:56,620 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.5 K 2024-11-28T00:41:56,620 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-28T00:41:56,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742025_1201 (size=995) 2024-11-28T00:41:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742025_1201 (size=995) 2024-11-28T00:41:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742025_1201 (size=995) 2024-11-28T00:41:56,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742026_1202 (size=35) 2024-11-28T00:41:56,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742026_1202 (size=35) 2024-11-28T00:41:56,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742026_1202 (size=35) 2024-11-28T00:41:56,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742027_1203 (size=304071) 2024-11-28T00:41:56,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742027_1203 (size=304071) 2024-11-28T00:41:56,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742027_1203 (size=304071) 2024-11-28T00:41:56,668 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:41:56,668 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:41:56,879 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:41:56,925 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:50900 2024-11-28T00:41:57,060 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:41:58,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-28T00:41:58,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-28T00:41:58,443 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-28T00:41:58,443 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-28T00:41:58,444 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-28T00:42:01,509 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:39222 2024-11-28T00:42:01,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742028_1204 (size=349769) 2024-11-28T00:42:01,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742028_1204 (size=349769) 2024-11-28T00:42:01,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742028_1204 (size=349769) 2024-11-28T00:42:03,710 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:46940 2024-11-28T00:42:03,710 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:57596 2024-11-28T00:42:03,946 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:42:04,583 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:57608 2024-11-28T00:42:04,584 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:46944 2024-11-28T00:42:06,963 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0003_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:42:09,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742029_1205 (size=14714) 2024-11-28T00:42:09,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742029_1205 (size=14714) 2024-11-28T00:42:09,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742029_1205 (size=14714) 2024-11-28T00:42:10,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742031_1207 (size=8242) 2024-11-28T00:42:10,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742031_1207 (size=8242) 2024-11-28T00:42:10,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742031_1207 (size=8242) 2024-11-28T00:42:11,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000003/launch_container.sh] 2024-11-28T00:42:11,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000003/container_tokens] 2024-11-28T00:42:11,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000003/sysfs] 2024-11-28T00:42:11,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742032_1208 (size=5032) 2024-11-28T00:42:11,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742032_1208 (size=5032) 2024-11-28T00:42:11,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742032_1208 (size=5032) 2024-11-28T00:42:11,750 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000005/launch_container.sh] 2024-11-28T00:42:11,750 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000005/container_tokens] 2024-11-28T00:42:11,750 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000005/sysfs] 2024-11-28T00:42:11,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742033_1209 (size=5679) 2024-11-28T00:42:11,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742033_1209 (size=5679) 2024-11-28T00:42:11,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742033_1209 (size=5679) 2024-11-28T00:42:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742030_1206 (size=31710) 2024-11-28T00:42:11,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742030_1206 (size=31710) 2024-11-28T00:42:11,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742030_1206 (size=31710) 2024-11-28T00:42:11,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742034_1210 (size=462) 2024-11-28T00:42:11,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742034_1210 (size=462) 2024-11-28T00:42:11,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742034_1210 (size=462) 2024-11-28T00:42:11,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742035_1211 (size=31710) 2024-11-28T00:42:11,912 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000004/launch_container.sh] 2024-11-28T00:42:11,912 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000004/container_tokens] 2024-11-28T00:42:11,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742035_1211 (size=31710) 2024-11-28T00:42:11,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742035_1211 (size=31710) 2024-11-28T00:42:11,913 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000004/sysfs] 2024-11-28T00:42:11,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742036_1212 (size=349769) 2024-11-28T00:42:11,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742036_1212 (size=349769) 2024-11-28T00:42:11,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742036_1212 (size=349769) 2024-11-28T00:42:11,947 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:46724 2024-11-28T00:42:11,960 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:59648 2024-11-28T00:42:13,861 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:42:13,862 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:42:13,868 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-11-28T00:42:13,868 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:42:13,869 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:42:13,869 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-28T00:42:13,869 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-28T00:42:13,869 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-28T00:42:13,869 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-28T00:42:13,870 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-28T00:42:13,870 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754515220/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-28T00:42:13,878 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testExportWithResetTtl 2024-11-28T00:42:14,353 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:34884 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 474ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/, blockId=1073741830, seqno=689 2024-11-28T00:42:14,353 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:54774 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 474ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/, blockId=1073741830, seqno=689 2024-11-28T00:42:14,353 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:55952 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 474ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/, blockId=1073741830, seqno=689 2024-11-28T00:42:14,353 INFO [AsyncFSWAL-0-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980 {}] wal.AbstractFSWAL(1368): Slow sync cost: 474 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46245,DS-9a7ec647-91e4-4d14-be05-a67d2857cf91,DISK], DatanodeInfoWithStorage[127.0.0.1:45777,DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3,DISK], DatanodeInfoWithStorage[127.0.0.1:38327,DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2,DISK]] 2024-11-28T00:42:14,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-28T00:42:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-28T00:42:14,356 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754534356"}]},"ts":"1732754534356"} 2024-11-28T00:42:14,359 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-28T00:42:14,359 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-28T00:42:14,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-28T00:42:14,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, UNASSIGN}] 2024-11-28T00:42:14,363 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, UNASSIGN 2024-11-28T00:42:14,364 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, UNASSIGN 2024-11-28T00:42:14,365 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=0b28daf4d4c5becdc3e9e69ec3afa3fa, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:42:14,365 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=6885cdc89d76dd248176a0e4cf7684f8, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:42:14,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, UNASSIGN because future has completed 2024-11-28T00:42:14,367 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:42:14,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:42:14,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, UNASSIGN because future has completed 2024-11-28T00:42:14,368 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:42:14,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:42:14,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-28T00:42:14,521 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:42:14,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:42:14,521 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:42:14,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 0b28daf4d4c5becdc3e9e69ec3afa3fa, disabling compactions & flushes 2024-11-28T00:42:14,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:42:14,521 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:42:14,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:42:14,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 6885cdc89d76dd248176a0e4cf7684f8, disabling compactions & flushes 2024-11-28T00:42:14,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. after waiting 0 ms 2024-11-28T00:42:14,521 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:42:14,521 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:42:14,522 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:42:14,522 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. after waiting 0 ms 2024-11-28T00:42:14,522 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:42:14,530 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:42:14,530 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:42:14,531 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:42:14,531 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8. 2024-11-28T00:42:14,531 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 6885cdc89d76dd248176a0e4cf7684f8: Waiting for close lock at 1732754534521Running coprocessor pre-close hooks at 1732754534521Disabling compacts and flushes for region at 1732754534521Disabling writes for close at 1732754534522 (+1 ms)Writing region close event to WAL at 1732754534523 (+1 ms)Running coprocessor post-close hooks at 1732754534530 (+7 ms)Closed at 1732754534531 (+1 ms) 2024-11-28T00:42:14,531 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:42:14,531 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa. 2024-11-28T00:42:14,531 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 0b28daf4d4c5becdc3e9e69ec3afa3fa: Waiting for close lock at 1732754534521Running coprocessor pre-close hooks at 1732754534521Disabling compacts and flushes for region at 1732754534521Disabling writes for close at 1732754534521Writing region close event to WAL at 1732754534523 (+2 ms)Running coprocessor post-close hooks at 1732754534531 (+8 ms)Closed at 1732754534531 2024-11-28T00:42:14,533 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:42:14,533 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=6885cdc89d76dd248176a0e4cf7684f8, regionState=CLOSED 2024-11-28T00:42:14,533 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:42:14,534 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=0b28daf4d4c5becdc3e9e69ec3afa3fa, regionState=CLOSED 2024-11-28T00:42:14,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:42:14,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:42:14,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-11-28T00:42:14,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 6885cdc89d76dd248176a0e4cf7684f8, server=97f25515df69,34347,1732754369169 in 168 msec 2024-11-28T00:42:14,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6885cdc89d76dd248176a0e4cf7684f8, UNASSIGN in 179 msec 2024-11-28T00:42:14,543 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-11-28T00:42:14,544 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 0b28daf4d4c5becdc3e9e69ec3afa3fa, server=97f25515df69,43795,1732754369084 in 173 msec 2024-11-28T00:42:14,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=84, resume processing ppid=83 2024-11-28T00:42:14,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0b28daf4d4c5becdc3e9e69ec3afa3fa, UNASSIGN in 181 msec 2024-11-28T00:42:14,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-11-28T00:42:14,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 185 msec 2024-11-28T00:42:14,548 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754534548"}]},"ts":"1732754534548"} 2024-11-28T00:42:14,549 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-28T00:42:14,549 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-28T00:42:14,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 672 msec 2024-11-28T00:42:14,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-28T00:42:14,668 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:42:14,668 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testExportWithResetTtl 2024-11-28T00:42:14,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:42:14,670 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:42:14,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-28T00:42:14,671 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:42:14,675 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-28T00:42:14,676 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:42:14,676 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:42:14,677 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/recovered.edits] 2024-11-28T00:42:14,677 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/recovered.edits] 2024-11-28T00:42:14,680 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/cf/d614f665d0234489b13f5a75ded078cf to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/cf/d614f665d0234489b13f5a75ded078cf 2024-11-28T00:42:14,680 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/cf/f9d4e02b2b65447e81c0dc6b00b35723 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/cf/f9d4e02b2b65447e81c0dc6b00b35723 2024-11-28T00:42:14,683 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/recovered.edits/8.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8/recovered.edits/8.seqid 2024-11-28T00:42:14,683 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/recovered.edits/8.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa/recovered.edits/8.seqid 2024-11-28T00:42:14,683 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:42:14,683 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportWithResetTtl/0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:42:14,683 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-28T00:42:14,684 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-28T00:42:14,684 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-11-28T00:42:14,687 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128b011514b64dc493a938b20bf934e6040_6885cdc89d76dd248176a0e4cf7684f8 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128b011514b64dc493a938b20bf934e6040_6885cdc89d76dd248176a0e4cf7684f8 2024-11-28T00:42:14,688 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128d35448b0042147da972dfdf64905632f_0b28daf4d4c5becdc3e9e69ec3afa3fa to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128d35448b0042147da972dfdf64905632f_0b28daf4d4c5becdc3e9e69ec3afa3fa 2024-11-28T00:42:14,688 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-28T00:42:14,690 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:42:14,693 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-28T00:42:14,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:14,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:14,968 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:14,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:14,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:42:14,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:42:14,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:42:14,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:42:14,972 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-28T00:42:14,974 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:42:14,974 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-28T00:42:14,975 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754534974"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:14,975 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754534974"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:14,978 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:42:14,978 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0b28daf4d4c5becdc3e9e69ec3afa3fa, NAME => 'testExportWithResetTtl,,1732754514190.0b28daf4d4c5becdc3e9e69ec3afa3fa.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6885cdc89d76dd248176a0e4cf7684f8, NAME => 'testExportWithResetTtl,1,1732754514190.6885cdc89d76dd248176a0e4cf7684f8.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:42:14,978 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-28T00:42:14,979 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754534978"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:14,981 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-28T00:42:14,982 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:42:14,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 314 msec 2024-11-28T00:42:15,080 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000002/launch_container.sh] 2024-11-28T00:42:15,080 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000002/container_tokens] 2024-11-28T00:42:15,080 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000002/sysfs] 2024-11-28T00:42:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:15,168 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:42:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,168 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,169 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-28T00:42:15,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:15,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:15,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:15,170 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-28T00:42:15,170 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:42:15,171 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithResetTtl 2024-11-28T00:42:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:42:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-28T00:42:15,173 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754535173"}]},"ts":"1732754535173"} 2024-11-28T00:42:15,175 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-28T00:42:15,175 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-28T00:42:15,176 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-28T00:42:15,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, UNASSIGN}] 2024-11-28T00:42:15,178 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, UNASSIGN 2024-11-28T00:42:15,178 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, UNASSIGN 2024-11-28T00:42:15,178 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=cb80a9dac6c2e528157cc1200bb2f63a, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:42:15,178 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=4dca358a081c535cc775fd96f8b16a8c, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:42:15,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, UNASSIGN because future has completed 2024-11-28T00:42:15,180 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:42:15,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4dca358a081c535cc775fd96f8b16a8c, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:42:15,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, UNASSIGN because future has completed 2024-11-28T00:42:15,182 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:42:15,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:42:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-28T00:42:15,333 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:42:15,334 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:42:15,334 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 4dca358a081c535cc775fd96f8b16a8c, disabling compactions & flushes 2024-11-28T00:42:15,334 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:42:15,334 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:42:15,334 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. after waiting 0 ms 2024-11-28T00:42:15,334 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:42:15,334 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:42:15,334 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:42:15,335 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing cb80a9dac6c2e528157cc1200bb2f63a, disabling compactions & flushes 2024-11-28T00:42:15,335 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:42:15,335 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:42:15,335 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. after waiting 0 ms 2024-11-28T00:42:15,335 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:42:15,346 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:42:15,347 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:42:15,347 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c. 2024-11-28T00:42:15,347 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 4dca358a081c535cc775fd96f8b16a8c: Waiting for close lock at 1732754535334Running coprocessor pre-close hooks at 1732754535334Disabling compacts and flushes for region at 1732754535334Disabling writes for close at 1732754535334Writing region close event to WAL at 1732754535340 (+6 ms)Running coprocessor post-close hooks at 1732754535347 (+7 ms)Closed at 1732754535347 2024-11-28T00:42:15,351 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:42:15,351 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=4dca358a081c535cc775fd96f8b16a8c, regionState=CLOSED 2024-11-28T00:42:15,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4dca358a081c535cc775fd96f8b16a8c, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:42:15,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-11-28T00:42:15,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 4dca358a081c535cc775fd96f8b16a8c, server=97f25515df69,43795,1732754369084 in 175 msec 2024-11-28T00:42:15,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4dca358a081c535cc775fd96f8b16a8c, UNASSIGN in 181 msec 2024-11-28T00:42:15,360 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:42:15,361 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:42:15,361 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a. 2024-11-28T00:42:15,361 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for cb80a9dac6c2e528157cc1200bb2f63a: Waiting for close lock at 1732754535335Running coprocessor pre-close hooks at 1732754535335Disabling compacts and flushes for region at 1732754535335Disabling writes for close at 1732754535335Writing region close event to WAL at 1732754535345 (+10 ms)Running coprocessor post-close hooks at 1732754535361 (+16 ms)Closed at 1732754535361 2024-11-28T00:42:15,363 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:42:15,364 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=cb80a9dac6c2e528157cc1200bb2f63a, regionState=CLOSED 2024-11-28T00:42:15,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:42:15,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-11-28T00:42:15,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure cb80a9dac6c2e528157cc1200bb2f63a, server=97f25515df69,38585,1732754368938 in 187 msec 2024-11-28T00:42:15,374 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-11-28T00:42:15,374 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=cb80a9dac6c2e528157cc1200bb2f63a, UNASSIGN in 194 msec 2024-11-28T00:42:15,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-11-28T00:42:15,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 198 msec 2024-11-28T00:42:15,377 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754535376"}]},"ts":"1732754535376"} 2024-11-28T00:42:15,379 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-28T00:42:15,379 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-28T00:42:15,381 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 209 msec 2024-11-28T00:42:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-28T00:42:15,488 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:42:15,489 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithResetTtl 2024-11-28T00:42:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:42:15,490 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:42:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-28T00:42:15,492 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:42:15,494 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-28T00:42:15,496 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:42:15,496 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:42:15,498 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/recovered.edits] 2024-11-28T00:42:15,498 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/recovered.edits] 2024-11-28T00:42:15,502 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/cf/2697cb5e324a4d6c9b7fb6068806b0fa to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/cf/2697cb5e324a4d6c9b7fb6068806b0fa 2024-11-28T00:42:15,502 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/cf/d61d31a7b43c4ecfa73a7cf444fe25b4 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/cf/d61d31a7b43c4ecfa73a7cf444fe25b4 2024-11-28T00:42:15,505 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a/recovered.edits/9.seqid 2024-11-28T00:42:15,505 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c/recovered.edits/9.seqid 2024-11-28T00:42:15,506 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:42:15,506 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithResetTtl/4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:42:15,506 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-28T00:42:15,506 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-28T00:42:15,507 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-11-28T00:42:15,511 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241128c2396ded687d4905b379720dd20aa855_4dca358a081c535cc775fd96f8b16a8c to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241128c2396ded687d4905b379720dd20aa855_4dca358a081c535cc775fd96f8b16a8c 2024-11-28T00:42:15,512 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411282327e41cbcd04e03b7b881a9045ebc3f_cb80a9dac6c2e528157cc1200bb2f63a to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411282327e41cbcd04e03b7b881a9045ebc3f_cb80a9dac6c2e528157cc1200bb2f63a 2024-11-28T00:42:15,512 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-28T00:42:15,515 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:42:15,518 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-28T00:42:15,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,568 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-28T00:42:15,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-28T00:42:15,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-28T00:42:15,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-28T00:42:15,570 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-28T00:42:15,571 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:42:15,571 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-28T00:42:15,571 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754535571"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:15,571 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754535571"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:15,573 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:42:15,573 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cb80a9dac6c2e528157cc1200bb2f63a, NAME => 'testtb-testExportWithResetTtl,,1732754512110.cb80a9dac6c2e528157cc1200bb2f63a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4dca358a081c535cc775fd96f8b16a8c, NAME => 'testtb-testExportWithResetTtl,1,1732754512110.4dca358a081c535cc775fd96f8b16a8c.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:42:15,574 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-28T00:42:15,574 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754535574"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:15,576 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-28T00:42:15,577 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:42:15,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,578 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,579 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:42:15,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-28T00:42:15,585 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 88 msec 2024-11-28T00:42:15,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-28T00:42:15,688 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-28T00:42:15,689 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:42:15,699 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-28T00:42:15,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-28T00:42:15,705 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-28T00:42:15,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-28T00:42:15,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-28T00:42:15,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-28T00:42:15,739 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=796 (was 790) Potentially hanging thread: Thread-3052 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45857 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:58056 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42671 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:36898 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:42671 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:53584 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46587 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37905 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 128957) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) - Thread LEAK? -, OpenFileDescriptor=805 (was 809), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=670 (was 610) - SystemLoadAverage LEAK? -, ProcessCount=24 (was 26), AvailableMemoryMB=2791 (was 2789) - AvailableMemoryMB LEAK? - 2024-11-28T00:42:15,739 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=796 is superior to 500 2024-11-28T00:42:15,755 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=796, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=670, ProcessCount=23, AvailableMemoryMB=2790 2024-11-28T00:42:15,755 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=796 is superior to 500 2024-11-28T00:42:15,758 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:42:15,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:15,760 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:42:15,760 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-11-28T00:42:15,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:42:15,762 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:42:15,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742037_1213 (size=443) 2024-11-28T00:42:15,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742037_1213 (size=443) 2024-11-28T00:42:15,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742037_1213 (size=443) 2024-11-28T00:42:15,780 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 39b7538db3356e20c5d1405f3b3acf6f, NAME => 'testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:15,781 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e95609bef7d23641911d24e81df6cd5a, NAME => 'testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:15,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742038_1214 (size=68) 2024-11-28T00:42:15,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742038_1214 (size=68) 2024-11-28T00:42:15,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742038_1214 (size=68) 2024-11-28T00:42:15,818 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:15,818 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 39b7538db3356e20c5d1405f3b3acf6f, disabling compactions & flushes 2024-11-28T00:42:15,819 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:15,819 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:15,819 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. after waiting 0 ms 2024-11-28T00:42:15,819 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:15,819 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:15,819 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 39b7538db3356e20c5d1405f3b3acf6f: Waiting for close lock at 1732754535818Disabling compacts and flushes for region at 1732754535818Disabling writes for close at 1732754535819 (+1 ms)Writing region close event to WAL at 1732754535819Closed at 1732754535819 2024-11-28T00:42:15,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742039_1215 (size=68) 2024-11-28T00:42:15,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742039_1215 (size=68) 2024-11-28T00:42:15,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742039_1215 (size=68) 2024-11-28T00:42:15,825 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:15,825 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing e95609bef7d23641911d24e81df6cd5a, disabling compactions & flushes 2024-11-28T00:42:15,825 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:15,825 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:15,825 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. after waiting 0 ms 2024-11-28T00:42:15,825 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:15,825 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:15,825 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for e95609bef7d23641911d24e81df6cd5a: Waiting for close lock at 1732754535825Disabling compacts and flushes for region at 1732754535825Disabling writes for close at 1732754535825Writing region close event to WAL at 1732754535825Closed at 1732754535825 2024-11-28T00:42:15,827 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:42:15,829 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732754535827"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754535827"}]},"ts":"1732754535827"} 2024-11-28T00:42:15,829 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732754535827"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754535827"}]},"ts":"1732754535827"} 2024-11-28T00:42:15,833 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:42:15,834 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:42:15,834 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754535834"}]},"ts":"1732754535834"} 2024-11-28T00:42:15,837 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-28T00:42:15,837 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:42:15,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:42:15,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:42:15,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:42:15,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:42:15,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:42:15,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:42:15,839 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:42:15,839 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:42:15,839 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:42:15,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:42:15,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, ASSIGN}] 2024-11-28T00:42:15,841 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, ASSIGN 2024-11-28T00:42:15,841 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, ASSIGN 2024-11-28T00:42:15,842 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:42:15,842 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:42:15,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:42:15,992 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:42:15,993 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=e95609bef7d23641911d24e81df6cd5a, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:42:15,994 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=39b7538db3356e20c5d1405f3b3acf6f, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:42:15,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, ASSIGN because future has completed 2024-11-28T00:42:15,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure e95609bef7d23641911d24e81df6cd5a, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:42:15,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, ASSIGN because future has completed 2024-11-28T00:42:15,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:42:16,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:42:16,156 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:16,156 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => e95609bef7d23641911d24e81df6cd5a, NAME => 'testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:42:16,156 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. service=AccessControlService 2024-11-28T00:42:16,156 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:42:16,157 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,157 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:16,157 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,157 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,160 INFO [StoreOpener-e95609bef7d23641911d24e81df6cd5a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,160 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:16,160 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 39b7538db3356e20c5d1405f3b3acf6f, NAME => 'testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:42:16,160 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. service=AccessControlService 2024-11-28T00:42:16,161 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:42:16,161 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,161 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:16,161 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,161 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,163 INFO [StoreOpener-39b7538db3356e20c5d1405f3b3acf6f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,164 INFO [StoreOpener-e95609bef7d23641911d24e81df6cd5a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e95609bef7d23641911d24e81df6cd5a columnFamilyName cf 2024-11-28T00:42:16,167 INFO [StoreOpener-39b7538db3356e20c5d1405f3b3acf6f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 39b7538db3356e20c5d1405f3b3acf6f columnFamilyName cf 2024-11-28T00:42:16,167 DEBUG [StoreOpener-e95609bef7d23641911d24e81df6cd5a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:16,167 INFO [StoreOpener-e95609bef7d23641911d24e81df6cd5a-1 {}] regionserver.HStore(327): Store=e95609bef7d23641911d24e81df6cd5a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:42:16,168 DEBUG [StoreOpener-39b7538db3356e20c5d1405f3b3acf6f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:16,168 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,168 INFO [StoreOpener-39b7538db3356e20c5d1405f3b3acf6f-1 {}] regionserver.HStore(327): Store=39b7538db3356e20c5d1405f3b3acf6f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:42:16,169 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,169 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,169 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,170 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,170 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,170 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,170 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,171 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,171 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,172 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,174 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,175 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:42:16,176 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened e95609bef7d23641911d24e81df6cd5a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59710243, jitterRate=-0.11024804413318634}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:42:16,176 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:16,177 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for e95609bef7d23641911d24e81df6cd5a: Running coprocessor pre-open hook at 1732754536157Writing region info on filesystem at 1732754536157Initializing all the Stores at 1732754536159 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754536159Cleaning up temporary data from old regions at 1732754536170 (+11 ms)Running coprocessor post-open hooks at 1732754536176 (+6 ms)Region opened successfully at 1732754536177 (+1 ms) 2024-11-28T00:42:16,178 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a., pid=99, masterSystemTime=1732754536151 2024-11-28T00:42:16,182 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:16,182 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:16,182 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=e95609bef7d23641911d24e81df6cd5a, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:42:16,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure e95609bef7d23641911d24e81df6cd5a, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:42:16,193 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:42:16,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-11-28T00:42:16,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure e95609bef7d23641911d24e81df6cd5a, server=97f25515df69,34347,1732754369169 in 192 msec 2024-11-28T00:42:16,194 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 39b7538db3356e20c5d1405f3b3acf6f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63865129, jitterRate=-0.04833541810512543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:42:16,194 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:16,194 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 39b7538db3356e20c5d1405f3b3acf6f: Running coprocessor pre-open hook at 1732754536161Writing region info on filesystem at 1732754536161Initializing all the Stores at 1732754536162 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754536162Cleaning up temporary data from old regions at 1732754536171 (+9 ms)Running coprocessor post-open hooks at 1732754536194 (+23 ms)Region opened successfully at 1732754536194 2024-11-28T00:42:16,196 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f., pid=100, masterSystemTime=1732754536154 2024-11-28T00:42:16,197 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, ASSIGN in 354 msec 2024-11-28T00:42:16,198 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:16,199 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:16,199 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=39b7538db3356e20c5d1405f3b3acf6f, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:42:16,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:42:16,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-11-28T00:42:16,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f, server=97f25515df69,43795,1732754369084 in 209 msec 2024-11-28T00:42:16,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-11-28T00:42:16,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, ASSIGN in 371 msec 2024-11-28T00:42:16,218 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:42:16,218 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754536218"}]},"ts":"1732754536218"} 2024-11-28T00:42:16,225 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-28T00:42:16,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:42:16,232 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-28T00:42:16,236 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:42:16,368 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:16,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:16,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:16,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:42:16,420 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:16,420 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:16,421 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:16,421 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:16,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 663 msec 2024-11-28T00:42:16,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:42:16,898 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:42:16,898 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:16,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-28T00:42:16,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:16,903 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:42:16,907 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:16,918 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:16,939 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:16,944 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:42:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754536944 (current time:1732754536944). 2024-11-28T00:42:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:42:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:42:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:42:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d702b17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:16,960 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:16,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:16,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:16,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c31d8e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:16,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:16,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:16,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:16,963 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45036, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:16,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35139bcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:16,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:16,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:16,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48090, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:16,969 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:16,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:16,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:16,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:16,970 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:16,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@510d464f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:16,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:16,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:16,979 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:16,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:16,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:16,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75bfa50a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:16,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:16,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:16,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:16,987 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45048, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:16,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fcf5b34, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:16,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:16,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:16,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:16,992 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48102, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:16,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:42:16,996 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:16,996 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:42:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:42:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:42:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-28T00:42:16,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:42:16,999 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:42:17,001 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:42:17,004 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:42:17,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742040_1216 (size=170) 2024-11-28T00:42:17,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742040_1216 (size=170) 2024-11-28T00:42:17,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742040_1216 (size=170) 2024-11-28T00:42:17,064 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:42:17,064 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a}] 2024-11-28T00:42:17,065 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:17,065 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:17,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:42:17,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-11-28T00:42:17,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 39b7538db3356e20c5d1405f3b3acf6f: 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for e95609bef7d23641911d24e81df6cd5a: 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. for emptySnaptb0-testExportFileSystemState completed. 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. for emptySnaptb0-testExportFileSystemState completed. 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-28T00:42:17,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-28T00:42:17,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:17,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:17,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:42:17,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:42:17,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742041_1217 (size=71) 2024-11-28T00:42:17,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742041_1217 (size=71) 2024-11-28T00:42:17,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742041_1217 (size=71) 2024-11-28T00:42:17,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:17,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-28T00:42:17,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-28T00:42:17,313 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:17,314 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:17,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a in 251 msec 2024-11-28T00:42:17,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:42:17,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742042_1218 (size=71) 2024-11-28T00:42:17,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742042_1218 (size=71) 2024-11-28T00:42:17,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742042_1218 (size=71) 2024-11-28T00:42:17,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:17,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-11-28T00:42:17,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-11-28T00:42:17,346 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:17,346 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:17,350 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-11-28T00:42:17,350 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f in 283 msec 2024-11-28T00:42:17,350 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:42:17,351 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:42:17,377 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:42:17,377 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:42:17,377 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:17,378 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:42:17,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742043_1219 (size=63) 2024-11-28T00:42:17,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742043_1219 (size=63) 2024-11-28T00:42:17,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742043_1219 (size=63) 2024-11-28T00:42:17,430 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:42:17,430 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-28T00:42:17,431 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-28T00:42:17,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742044_1220 (size=653) 2024-11-28T00:42:17,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742044_1220 (size=653) 2024-11-28T00:42:17,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742044_1220 (size=653) 2024-11-28T00:42:17,499 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:42:17,513 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:42:17,514 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-28T00:42:17,518 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:42:17,518 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-28T00:42:17,521 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 522 msec 2024-11-28T00:42:17,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:42:17,628 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:42:17,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:42:17,671 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:42:17,673 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:17,678 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-28T00:42:17,678 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:17,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:42:17,682 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:17,690 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:17,702 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:42:17,709 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:42:17,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754537709 (current time:1732754537709). 2024-11-28T00:42:17,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:42:17,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:42:17,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:42:17,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b319f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:17,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:17,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:17,723 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:17,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:17,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:17,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bb94dff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:17,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:17,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:17,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:17,726 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45064, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:17,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f8eb3a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:17,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:17,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:17,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:17,730 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48106, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:17,731 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:17,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:17,732 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:17,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b2c7adb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:17,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:17,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:17,741 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:17,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:17,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:17,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d61716f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:17,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:17,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:17,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:17,743 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45076, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:17,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6820edf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:17,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:17,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:17,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:17,748 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48110, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:17,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:42:17,754 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:17,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:17,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:17,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:17,754 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:17,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:42:17,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:42:17,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:42:17,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-28T00:42:17,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:42:17,758 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:42:17,760 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:42:17,763 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:42:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:42:17,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742045_1221 (size=165) 2024-11-28T00:42:17,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742045_1221 (size=165) 2024-11-28T00:42:17,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742045_1221 (size=165) 2024-11-28T00:42:17,874 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:42:17,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a}] 2024-11-28T00:42:17,877 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:17,877 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:18,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-11-28T00:42:18,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-11-28T00:42:18,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:18,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:18,032 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 39b7538db3356e20c5d1405f3b3acf6f 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-28T00:42:18,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing e95609bef7d23641911d24e81df6cd5a 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-28T00:42:18,066 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0003_000001 (auth:SIMPLE) from 127.0.0.1:59654 2024-11-28T00:42:18,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411287252b6aaf66d4f3cada1a7192d9a06cd_e95609bef7d23641911d24e81df6cd5a is 71, key is 13e7e98d9c047a6d137f13204b4751f2/cf:q/1732754537671/Put/seqid=0 2024-11-28T00:42:18,077 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000001/launch_container.sh] 2024-11-28T00:42:18,077 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000001/container_tokens] 2024-11-28T00:42:18,077 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0003/container_1732754375741_0003_01_000001/sysfs] 2024-11-28T00:42:18,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:42:18,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288feef391adc24194a0b7486ea00ea2bd_39b7538db3356e20c5d1405f3b3acf6f is 71, key is 0c8535545420c4d84d9ff51125c6d675/cf:q/1732754537666/Put/seqid=0 2024-11-28T00:42:18,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742046_1222 (size=8031) 2024-11-28T00:42:18,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742046_1222 (size=8031) 2024-11-28T00:42:18,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742047_1223 (size=5241) 2024-11-28T00:42:18,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742047_1223 (size=5241) 2024-11-28T00:42:18,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742046_1222 (size=8031) 2024-11-28T00:42:18,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742047_1223 (size=5241) 2024-11-28T00:42:18,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:18,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:18,094 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411287252b6aaf66d4f3cada1a7192d9a06cd_e95609bef7d23641911d24e81df6cd5a to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411287252b6aaf66d4f3cada1a7192d9a06cd_e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:18,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/.tmp/cf/17a81b1190c646f18ffbaba914ade369, store: [table=testtb-testExportFileSystemState family=cf region=e95609bef7d23641911d24e81df6cd5a] 2024-11-28T00:42:18,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/.tmp/cf/17a81b1190c646f18ffbaba914ade369 is 209, key is 159eb31340d05ec213a369bb75e565cce/cf:q/1732754537671/Put/seqid=0 2024-11-28T00:42:18,104 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288feef391adc24194a0b7486ea00ea2bd_39b7538db3356e20c5d1405f3b3acf6f to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411288feef391adc24194a0b7486ea00ea2bd_39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:18,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/.tmp/cf/b894a3b363154b4182b6188ca1f52656, store: [table=testtb-testExportFileSystemState family=cf region=39b7538db3356e20c5d1405f3b3acf6f] 2024-11-28T00:42:18,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/.tmp/cf/b894a3b363154b4182b6188ca1f52656 is 209, key is 0d4bf569147cf43978ce1160c4fdb7476/cf:q/1732754537666/Put/seqid=0 2024-11-28T00:42:18,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742048_1224 (size=14587) 2024-11-28T00:42:18,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742048_1224 (size=14587) 2024-11-28T00:42:18,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742048_1224 (size=14587) 2024-11-28T00:42:18,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/.tmp/cf/17a81b1190c646f18ffbaba914ade369 2024-11-28T00:42:18,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742049_1225 (size=6326) 2024-11-28T00:42:18,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742049_1225 (size=6326) 2024-11-28T00:42:18,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/.tmp/cf/17a81b1190c646f18ffbaba914ade369 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/cf/17a81b1190c646f18ffbaba914ade369 2024-11-28T00:42:18,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742049_1225 (size=6326) 2024-11-28T00:42:18,127 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/.tmp/cf/b894a3b363154b4182b6188ca1f52656 2024-11-28T00:42:18,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/cf/17a81b1190c646f18ffbaba914ade369, entries=45, sequenceid=6, filesize=14.2 K 2024-11-28T00:42:18,137 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for e95609bef7d23641911d24e81df6cd5a in 105ms, sequenceid=6, compaction requested=false 2024-11-28T00:42:18,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-28T00:42:18,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/.tmp/cf/b894a3b363154b4182b6188ca1f52656 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/cf/b894a3b363154b4182b6188ca1f52656 2024-11-28T00:42:18,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for e95609bef7d23641911d24e81df6cd5a: 2024-11-28T00:42:18,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. for snaptb0-testExportFileSystemState completed. 2024-11-28T00:42:18,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:42:18,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:18,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/cf/17a81b1190c646f18ffbaba914ade369] hfiles 2024-11-28T00:42:18,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/cf/17a81b1190c646f18ffbaba914ade369 for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:42:18,145 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/cf/b894a3b363154b4182b6188ca1f52656, entries=5, sequenceid=6, filesize=6.2 K 2024-11-28T00:42:18,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 39b7538db3356e20c5d1405f3b3acf6f in 114ms, sequenceid=6, compaction requested=false 2024-11-28T00:42:18,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 39b7538db3356e20c5d1405f3b3acf6f: 2024-11-28T00:42:18,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. for snaptb0-testExportFileSystemState completed. 2024-11-28T00:42:18,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:42:18,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:18,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/cf/b894a3b363154b4182b6188ca1f52656] hfiles 2024-11-28T00:42:18,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/cf/b894a3b363154b4182b6188ca1f52656 for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:42:18,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742050_1226 (size=110) 2024-11-28T00:42:18,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742050_1226 (size=110) 2024-11-28T00:42:18,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742050_1226 (size=110) 2024-11-28T00:42:18,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:18,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-11-28T00:42:18,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-11-28T00:42:18,151 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:18,151 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:18,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e95609bef7d23641911d24e81df6cd5a in 304 msec 2024-11-28T00:42:18,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742051_1227 (size=110) 2024-11-28T00:42:18,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742051_1227 (size=110) 2024-11-28T00:42:18,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742051_1227 (size=110) 2024-11-28T00:42:18,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:18,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-28T00:42:18,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-11-28T00:42:18,217 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:18,217 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:18,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-11-28T00:42:18,221 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:42:18,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f in 344 msec 2024-11-28T00:42:18,223 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:42:18,224 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:42:18,225 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:42:18,225 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:18,227 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411287252b6aaf66d4f3cada1a7192d9a06cd_e95609bef7d23641911d24e81df6cd5a, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411288feef391adc24194a0b7486ea00ea2bd_39b7538db3356e20c5d1405f3b3acf6f] hfiles 2024-11-28T00:42:18,227 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411287252b6aaf66d4f3cada1a7192d9a06cd_e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:18,227 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411288feef391adc24194a0b7486ea00ea2bd_39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:18,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742052_1228 (size=294) 2024-11-28T00:42:18,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742052_1228 (size=294) 2024-11-28T00:42:18,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742052_1228 (size=294) 2024-11-28T00:42:18,245 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:42:18,245 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-28T00:42:18,246 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-28T00:42:18,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742053_1229 (size=963) 2024-11-28T00:42:18,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742053_1229 (size=963) 2024-11-28T00:42:18,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742053_1229 (size=963) 2024-11-28T00:42:18,289 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:42:18,303 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:42:18,303 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-28T00:42:18,305 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:42:18,306 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-28T00:42:18,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 551 msec 2024-11-28T00:42:18,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:42:18,389 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:42:18,390 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389 2024-11-28T00:42:18,390 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:18,437 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:18,437 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-28T00:42:18,440 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:42:18,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-28T00:42:18,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-28T00:42:18,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-28T00:42:18,443 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-28T00:42:18,467 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-28T00:42:18,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742054_1230 (size=165) 2024-11-28T00:42:18,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742054_1230 (size=165) 2024-11-28T00:42:18,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742054_1230 (size=165) 2024-11-28T00:42:18,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742055_1231 (size=963) 2024-11-28T00:42:18,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742055_1231 (size=963) 2024-11-28T00:42:18,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742055_1231 (size=963) 2024-11-28T00:42:18,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:18,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:18,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-1837747788247372510.jar 2024-11-28T00:42:19,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-13073981638201918334.jar 2024-11-28T00:42:19,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:19,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:42:19,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:42:19,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:42:19,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:42:19,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:42:19,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:42:19,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:42:19,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:42:19,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:42:19,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:42:19,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:42:19,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:19,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:19,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:42:19,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:19,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:19,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:42:19,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:42:19,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742056_1232 (size=24020) 2024-11-28T00:42:19,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742056_1232 (size=24020) 2024-11-28T00:42:19,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742056_1232 (size=24020) 2024-11-28T00:42:19,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742057_1233 (size=77755) 2024-11-28T00:42:19,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742057_1233 (size=77755) 2024-11-28T00:42:19,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742057_1233 (size=77755) 2024-11-28T00:42:19,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742058_1234 (size=131360) 2024-11-28T00:42:19,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742058_1234 (size=131360) 2024-11-28T00:42:19,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742058_1234 (size=131360) 2024-11-28T00:42:19,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742059_1235 (size=111793) 2024-11-28T00:42:19,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742059_1235 (size=111793) 2024-11-28T00:42:19,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742059_1235 (size=111793) 2024-11-28T00:42:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742060_1236 (size=1832290) 2024-11-28T00:42:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742060_1236 (size=1832290) 2024-11-28T00:42:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742060_1236 (size=1832290) 2024-11-28T00:42:19,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742061_1237 (size=8360005) 2024-11-28T00:42:19,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742061_1237 (size=8360005) 2024-11-28T00:42:19,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742061_1237 (size=8360005) 2024-11-28T00:42:19,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742062_1238 (size=503880) 2024-11-28T00:42:19,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742062_1238 (size=503880) 2024-11-28T00:42:19,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742062_1238 (size=503880) 2024-11-28T00:42:19,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742063_1239 (size=322274) 2024-11-28T00:42:19,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742063_1239 (size=322274) 2024-11-28T00:42:19,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742063_1239 (size=322274) 2024-11-28T00:42:19,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742064_1240 (size=20406) 2024-11-28T00:42:19,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742064_1240 (size=20406) 2024-11-28T00:42:19,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742064_1240 (size=20406) 2024-11-28T00:42:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742065_1241 (size=45609) 2024-11-28T00:42:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742065_1241 (size=45609) 2024-11-28T00:42:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742065_1241 (size=45609) 2024-11-28T00:42:19,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742066_1242 (size=136454) 2024-11-28T00:42:19,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742066_1242 (size=136454) 2024-11-28T00:42:19,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742066_1242 (size=136454) 2024-11-28T00:42:19,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742067_1243 (size=1597136) 2024-11-28T00:42:19,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742067_1243 (size=1597136) 2024-11-28T00:42:19,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742067_1243 (size=1597136) 2024-11-28T00:42:19,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742068_1244 (size=440956) 2024-11-28T00:42:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742068_1244 (size=440956) 2024-11-28T00:42:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742068_1244 (size=440956) 2024-11-28T00:42:19,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742069_1245 (size=30873) 2024-11-28T00:42:19,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742069_1245 (size=30873) 2024-11-28T00:42:19,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742069_1245 (size=30873) 2024-11-28T00:42:19,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742070_1246 (size=29229) 2024-11-28T00:42:19,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742070_1246 (size=29229) 2024-11-28T00:42:19,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742070_1246 (size=29229) 2024-11-28T00:42:19,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742071_1247 (size=903861) 2024-11-28T00:42:19,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742071_1247 (size=903861) 2024-11-28T00:42:19,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742071_1247 (size=903861) 2024-11-28T00:42:19,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742072_1248 (size=6424743) 2024-11-28T00:42:19,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742072_1248 (size=6424743) 2024-11-28T00:42:19,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742072_1248 (size=6424743) 2024-11-28T00:42:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742073_1249 (size=5175431) 2024-11-28T00:42:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742073_1249 (size=5175431) 2024-11-28T00:42:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742073_1249 (size=5175431) 2024-11-28T00:42:19,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742074_1250 (size=232881) 2024-11-28T00:42:19,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742074_1250 (size=232881) 2024-11-28T00:42:19,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742074_1250 (size=232881) 2024-11-28T00:42:19,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742075_1251 (size=1323991) 2024-11-28T00:42:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742075_1251 (size=1323991) 2024-11-28T00:42:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742075_1251 (size=1323991) 2024-11-28T00:42:19,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742076_1252 (size=4695811) 2024-11-28T00:42:19,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742076_1252 (size=4695811) 2024-11-28T00:42:19,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742076_1252 (size=4695811) 2024-11-28T00:42:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742077_1253 (size=1877034) 2024-11-28T00:42:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742077_1253 (size=1877034) 2024-11-28T00:42:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742077_1253 (size=1877034) 2024-11-28T00:42:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742078_1254 (size=217555) 2024-11-28T00:42:19,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742078_1254 (size=217555) 2024-11-28T00:42:19,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742078_1254 (size=217555) 2024-11-28T00:42:20,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742079_1255 (size=4188619) 2024-11-28T00:42:20,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742079_1255 (size=4188619) 2024-11-28T00:42:20,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742079_1255 (size=4188619) 2024-11-28T00:42:20,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742080_1256 (size=127628) 2024-11-28T00:42:20,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742080_1256 (size=127628) 2024-11-28T00:42:20,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742080_1256 (size=127628) 2024-11-28T00:42:20,013 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:42:20,015 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-28T00:42:20,018 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-11-28T00:42:20,018 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-11-28T00:42:20,018 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-11-28T00:42:20,018 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-11-28T00:42:20,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742081_1257 (size=1035) 2024-11-28T00:42:20,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742081_1257 (size=1035) 2024-11-28T00:42:20,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742081_1257 (size=1035) 2024-11-28T00:42:20,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742082_1258 (size=35) 2024-11-28T00:42:20,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742082_1258 (size=35) 2024-11-28T00:42:20,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742082_1258 (size=35) 2024-11-28T00:42:20,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742083_1259 (size=304085) 2024-11-28T00:42:20,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742083_1259 (size=304085) 2024-11-28T00:42:20,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742083_1259 (size=304085) 2024-11-28T00:42:20,058 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:42:20,058 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:42:20,078 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:42:20,735 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:46730 2024-11-28T00:42:25,761 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:37872 2024-11-28T00:42:26,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742084_1260 (size=349783) 2024-11-28T00:42:26,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742084_1260 (size=349783) 2024-11-28T00:42:26,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742084_1260 (size=349783) 2024-11-28T00:42:26,880 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:42:28,031 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:58528 2024-11-28T00:42:28,032 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:48724 2024-11-28T00:42:28,857 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:58540 2024-11-28T00:42:28,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:48734 2024-11-28T00:42:31,059 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0004_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:42:32,141 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e95609bef7d23641911d24e81df6cd5a changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:42:32,141 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 39b7538db3356e20c5d1405f3b3acf6f changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:42:34,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742085_1261 (size=8031) 2024-11-28T00:42:34,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742085_1261 (size=8031) 2024-11-28T00:42:34,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742085_1261 (size=8031) 2024-11-28T00:42:34,417 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000003/launch_container.sh] 2024-11-28T00:42:34,417 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000003/container_tokens] 2024-11-28T00:42:34,417 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000003/sysfs] 2024-11-28T00:42:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742087_1263 (size=5241) 2024-11-28T00:42:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742087_1263 (size=5241) 2024-11-28T00:42:36,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742087_1263 (size=5241) 2024-11-28T00:42:36,483 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000005/launch_container.sh] 2024-11-28T00:42:36,483 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000005/container_tokens] 2024-11-28T00:42:36,483 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000005/sysfs] 2024-11-28T00:42:36,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742088_1264 (size=6326) 2024-11-28T00:42:36,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742088_1264 (size=6326) 2024-11-28T00:42:36,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742088_1264 (size=6326) 2024-11-28T00:42:37,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742089_1265 (size=14587) 2024-11-28T00:42:37,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742089_1265 (size=14587) 2024-11-28T00:42:37,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742089_1265 (size=14587) 2024-11-28T00:42:38,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742086_1262 (size=31750) 2024-11-28T00:42:38,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742086_1262 (size=31750) 2024-11-28T00:42:38,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742086_1262 (size=31750) 2024-11-28T00:42:38,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742090_1266 (size=466) 2024-11-28T00:42:38,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742090_1266 (size=466) 2024-11-28T00:42:38,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742090_1266 (size=466) 2024-11-28T00:42:38,112 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000002/launch_container.sh] 2024-11-28T00:42:38,112 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000002/container_tokens] 2024-11-28T00:42:38,113 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000002/sysfs] 2024-11-28T00:42:38,124 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000004/launch_container.sh] 2024-11-28T00:42:38,125 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000004/container_tokens] 2024-11-28T00:42:38,125 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000004/sysfs] 2024-11-28T00:42:38,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742091_1267 (size=31750) 2024-11-28T00:42:38,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742091_1267 (size=31750) 2024-11-28T00:42:38,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742091_1267 (size=31750) 2024-11-28T00:42:38,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742092_1268 (size=349783) 2024-11-28T00:42:38,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742092_1268 (size=349783) 2024-11-28T00:42:38,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742092_1268 (size=349783) 2024-11-28T00:42:38,169 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:40144 2024-11-28T00:42:38,177 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:40154 2024-11-28T00:42:39,243 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:42:39,244 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:42:39,252 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-11-28T00:42:39,252 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:42:39,253 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:42:39,253 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-28T00:42:39,254 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-28T00:42:39,255 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-28T00:42:39,255 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-28T00:42:39,255 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-28T00:42:39,255 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754538389/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-28T00:42:39,277 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemState 2024-11-28T00:42:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-28T00:42:39,283 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754559283"}]},"ts":"1732754559283"} 2024-11-28T00:42:39,285 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-28T00:42:39,285 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-28T00:42:39,286 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-28T00:42:39,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, UNASSIGN}] 2024-11-28T00:42:39,290 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, UNASSIGN 2024-11-28T00:42:39,290 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, UNASSIGN 2024-11-28T00:42:39,291 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=e95609bef7d23641911d24e81df6cd5a, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:42:39,291 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=39b7538db3356e20c5d1405f3b3acf6f, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:42:39,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, UNASSIGN because future has completed 2024-11-28T00:42:39,294 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:42:39,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:42:39,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, UNASSIGN because future has completed 2024-11-28T00:42:39,296 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:42:39,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure e95609bef7d23641911d24e81df6cd5a, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:42:39,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-28T00:42:39,449 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:39,449 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:42:39,449 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 39b7538db3356e20c5d1405f3b3acf6f, disabling compactions & flushes 2024-11-28T00:42:39,449 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:39,449 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:39,449 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. after waiting 0 ms 2024-11-28T00:42:39,449 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:39,449 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:39,450 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:42:39,450 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing e95609bef7d23641911d24e81df6cd5a, disabling compactions & flushes 2024-11-28T00:42:39,450 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:39,450 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:39,450 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. after waiting 0 ms 2024-11-28T00:42:39,450 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:39,509 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:42:39,510 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:42:39,510 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f. 2024-11-28T00:42:39,510 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 39b7538db3356e20c5d1405f3b3acf6f: Waiting for close lock at 1732754559449Running coprocessor pre-close hooks at 1732754559449Disabling compacts and flushes for region at 1732754559449Disabling writes for close at 1732754559449Writing region close event to WAL at 1732754559481 (+32 ms)Running coprocessor post-close hooks at 1732754559509 (+28 ms)Closed at 1732754559510 (+1 ms) 2024-11-28T00:42:39,514 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=39b7538db3356e20c5d1405f3b3acf6f, regionState=CLOSED 2024-11-28T00:42:39,515 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:39,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:42:39,518 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:42:39,519 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:42:39,519 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a. 2024-11-28T00:42:39,519 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for e95609bef7d23641911d24e81df6cd5a: Waiting for close lock at 1732754559450Running coprocessor pre-close hooks at 1732754559450Disabling compacts and flushes for region at 1732754559450Disabling writes for close at 1732754559450Writing region close event to WAL at 1732754559504 (+54 ms)Running coprocessor post-close hooks at 1732754559518 (+14 ms)Closed at 1732754559519 (+1 ms) 2024-11-28T00:42:39,522 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:39,523 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=e95609bef7d23641911d24e81df6cd5a, regionState=CLOSED 2024-11-28T00:42:39,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure e95609bef7d23641911d24e81df6cd5a, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:42:39,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-11-28T00:42:39,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 39b7538db3356e20c5d1405f3b3acf6f, server=97f25515df69,43795,1732754369084 in 224 msec 2024-11-28T00:42:39,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=39b7538db3356e20c5d1405f3b3acf6f, UNASSIGN in 238 msec 2024-11-28T00:42:39,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-11-28T00:42:39,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure e95609bef7d23641911d24e81df6cd5a, server=97f25515df69,34347,1732754369169 in 234 msec 2024-11-28T00:42:39,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-11-28T00:42:39,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e95609bef7d23641911d24e81df6cd5a, UNASSIGN in 244 msec 2024-11-28T00:42:39,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-28T00:42:39,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 249 msec 2024-11-28T00:42:39,540 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754559540"}]},"ts":"1732754559540"} 2024-11-28T00:42:39,542 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-28T00:42:39,543 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-28T00:42:39,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 267 msec 2024-11-28T00:42:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-28T00:42:39,599 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:42:39,600 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemState 2024-11-28T00:42:39,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:39,603 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-28T00:42:39,608 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:39,613 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-28T00:42:39,616 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:39,618 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/recovered.edits] 2024-11-28T00:42:39,623 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/cf/b894a3b363154b4182b6188ca1f52656 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/cf/b894a3b363154b4182b6188ca1f52656 2024-11-28T00:42:39,627 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:39,628 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f/recovered.edits/9.seqid 2024-11-28T00:42:39,629 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:39,631 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/recovered.edits] 2024-11-28T00:42:39,637 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/cf/17a81b1190c646f18ffbaba914ade369 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/cf/17a81b1190c646f18ffbaba914ade369 2024-11-28T00:42:39,640 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a/recovered.edits/9.seqid 2024-11-28T00:42:39,640 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemState/e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:39,640 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-28T00:42:39,641 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-28T00:42:39,642 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-11-28T00:42:39,645 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411287252b6aaf66d4f3cada1a7192d9a06cd_e95609bef7d23641911d24e81df6cd5a to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411287252b6aaf66d4f3cada1a7192d9a06cd_e95609bef7d23641911d24e81df6cd5a 2024-11-28T00:42:39,646 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411288feef391adc24194a0b7486ea00ea2bd_39b7538db3356e20c5d1405f3b3acf6f to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411288feef391adc24194a0b7486ea00ea2bd_39b7538db3356e20c5d1405f3b3acf6f 2024-11-28T00:42:39,647 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-28T00:42:39,649 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:39,652 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-28T00:42:39,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,715 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:42:39,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:42:39,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:42:39,717 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:42:39,719 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-28T00:42:39,721 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:39,721 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-28T00:42:39,722 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754559721"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:39,722 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754559721"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:39,724 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,724 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:39,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:39,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:39,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:42:39,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:39,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-28T00:42:39,727 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:42:39,727 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 39b7538db3356e20c5d1405f3b3acf6f, NAME => 'testtb-testExportFileSystemState,,1732754535757.39b7538db3356e20c5d1405f3b3acf6f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e95609bef7d23641911d24e81df6cd5a, NAME => 'testtb-testExportFileSystemState,1,1732754535757.e95609bef7d23641911d24e81df6cd5a.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:42:39,728 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-28T00:42:39,728 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754559728"}]},"ts":"9223372036854775807"} 2024-11-28T00:42:39,735 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-28T00:42:39,737 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:42:39,739 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 137 msec 2024-11-28T00:42:39,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-28T00:42:39,838 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-28T00:42:39,838 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:42:39,848 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-28T00:42:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-28T00:42:39,853 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-28T00:42:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-28T00:42:39,892 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=807 (was 796) Potentially hanging thread: Thread-3893 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_959099159_1 at /127.0.0.1:36722 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:57238 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:36738 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:40799 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:38538 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_959099159_1 at /127.0.0.1:57228 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40799 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 132578) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=819 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=797 (was 670) - SystemLoadAverage LEAK? -, ProcessCount=27 (was 23) - ProcessCount LEAK? -, AvailableMemoryMB=2372 (was 2790) 2024-11-28T00:42:39,893 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-28T00:42:39,922 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=807, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=797, ProcessCount=27, AvailableMemoryMB=2369 2024-11-28T00:42:39,922 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-28T00:42:39,924 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:42:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:42:39,928 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:42:39,931 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-11-28T00:42:39,932 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:42:39,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:42:39,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742093_1269 (size=440) 2024-11-28T00:42:39,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742093_1269 (size=440) 2024-11-28T00:42:39,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742093_1269 (size=440) 2024-11-28T00:42:39,967 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ec8c4d34a22f0b50125ac1d8afb59f63, NAME => 'testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:39,967 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7ee859a4f3f2733086f0f36798e3e970, NAME => 'testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:42:40,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742094_1270 (size=65) 2024-11-28T00:42:40,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742094_1270 (size=65) 2024-11-28T00:42:40,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742094_1270 (size=65) 2024-11-28T00:42:40,068 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:40,068 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing ec8c4d34a22f0b50125ac1d8afb59f63, disabling compactions & flushes 2024-11-28T00:42:40,068 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:40,069 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:40,069 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. after waiting 0 ms 2024-11-28T00:42:40,069 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:40,069 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:40,069 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for ec8c4d34a22f0b50125ac1d8afb59f63: Waiting for close lock at 1732754560068Disabling compacts and flushes for region at 1732754560068Disabling writes for close at 1732754560069 (+1 ms)Writing region close event to WAL at 1732754560069Closed at 1732754560069 2024-11-28T00:42:40,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742095_1271 (size=65) 2024-11-28T00:42:40,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742095_1271 (size=65) 2024-11-28T00:42:40,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742095_1271 (size=65) 2024-11-28T00:42:40,080 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:40,081 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 7ee859a4f3f2733086f0f36798e3e970, disabling compactions & flushes 2024-11-28T00:42:40,081 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:40,081 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:40,081 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. after waiting 0 ms 2024-11-28T00:42:40,081 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:40,081 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:40,081 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7ee859a4f3f2733086f0f36798e3e970: Waiting for close lock at 1732754560081Disabling compacts and flushes for region at 1732754560081Disabling writes for close at 1732754560081Writing region close event to WAL at 1732754560081Closed at 1732754560081 2024-11-28T00:42:40,083 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:42:40,084 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732754560083"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754560083"}]},"ts":"1732754560083"} 2024-11-28T00:42:40,084 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732754560083"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754560083"}]},"ts":"1732754560083"} 2024-11-28T00:42:40,087 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:42:40,088 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:42:40,089 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754560088"}]},"ts":"1732754560088"} 2024-11-28T00:42:40,095 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-28T00:42:40,096 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:42:40,098 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:42:40,098 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:42:40,098 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:42:40,098 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:42:40,098 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:42:40,098 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:42:40,098 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:42:40,098 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:42:40,098 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:42:40,098 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:42:40,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, ASSIGN}] 2024-11-28T00:42:40,100 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, ASSIGN 2024-11-28T00:42:40,100 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, ASSIGN 2024-11-28T00:42:40,101 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:42:40,101 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:42:40,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:42:40,252 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:42:40,252 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=7ee859a4f3f2733086f0f36798e3e970, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:42:40,253 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=ec8c4d34a22f0b50125ac1d8afb59f63, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:42:40,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, ASSIGN because future has completed 2024-11-28T00:42:40,255 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7ee859a4f3f2733086f0f36798e3e970, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:42:40,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, ASSIGN because future has completed 2024-11-28T00:42:40,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:42:40,411 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:40,411 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 7ee859a4f3f2733086f0f36798e3e970, NAME => 'testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:42:40,411 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. service=AccessControlService 2024-11-28T00:42:40,411 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:42:40,413 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,413 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:40,413 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,413 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,422 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:40,423 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => ec8c4d34a22f0b50125ac1d8afb59f63, NAME => 'testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:42:40,423 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. service=AccessControlService 2024-11-28T00:42:40,423 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:42:40,423 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,423 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:42:40,423 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,424 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,436 INFO [StoreOpener-7ee859a4f3f2733086f0f36798e3e970-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,436 INFO [StoreOpener-ec8c4d34a22f0b50125ac1d8afb59f63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,438 INFO [StoreOpener-7ee859a4f3f2733086f0f36798e3e970-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7ee859a4f3f2733086f0f36798e3e970 columnFamilyName cf 2024-11-28T00:42:40,440 DEBUG [StoreOpener-7ee859a4f3f2733086f0f36798e3e970-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:40,441 INFO [StoreOpener-7ee859a4f3f2733086f0f36798e3e970-1 {}] regionserver.HStore(327): Store=7ee859a4f3f2733086f0f36798e3e970/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:42:40,441 INFO [StoreOpener-ec8c4d34a22f0b50125ac1d8afb59f63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec8c4d34a22f0b50125ac1d8afb59f63 columnFamilyName cf 2024-11-28T00:42:40,441 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,442 DEBUG [StoreOpener-ec8c4d34a22f0b50125ac1d8afb59f63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:40,442 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,443 INFO [StoreOpener-ec8c4d34a22f0b50125ac1d8afb59f63-1 {}] regionserver.HStore(327): Store=ec8c4d34a22f0b50125ac1d8afb59f63/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:42:40,443 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,443 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,443 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,443 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,444 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,444 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,445 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,445 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,446 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,448 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,461 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:42:40,461 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 7ee859a4f3f2733086f0f36798e3e970; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62887008, jitterRate=-0.06291055679321289}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:42:40,462 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:40,462 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 7ee859a4f3f2733086f0f36798e3e970: Running coprocessor pre-open hook at 1732754560413Writing region info on filesystem at 1732754560413Initializing all the Stores at 1732754560421 (+8 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754560421Cleaning up temporary data from old regions at 1732754560443 (+22 ms)Running coprocessor post-open hooks at 1732754560462 (+19 ms)Region opened successfully at 1732754560462 2024-11-28T00:42:40,463 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:42:40,464 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970., pid=117, masterSystemTime=1732754560407 2024-11-28T00:42:40,464 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened ec8c4d34a22f0b50125ac1d8afb59f63; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66317016, jitterRate=-0.011799454689025879}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:42:40,464 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:40,464 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for ec8c4d34a22f0b50125ac1d8afb59f63: Running coprocessor pre-open hook at 1732754560424Writing region info on filesystem at 1732754560424Initializing all the Stores at 1732754560425 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754560425Cleaning up temporary data from old regions at 1732754560445 (+20 ms)Running coprocessor post-open hooks at 1732754560464 (+19 ms)Region opened successfully at 1732754560464 2024-11-28T00:42:40,467 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63., pid=118, masterSystemTime=1732754560411 2024-11-28T00:42:40,467 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:40,467 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:40,469 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=7ee859a4f3f2733086f0f36798e3e970, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:42:40,473 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:40,473 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:40,473 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=ec8c4d34a22f0b50125ac1d8afb59f63, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:42:40,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7ee859a4f3f2733086f0f36798e3e970, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:42:40,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:42:40,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-11-28T00:42:40,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 7ee859a4f3f2733086f0f36798e3e970, server=97f25515df69,43795,1732754369084 in 221 msec 2024-11-28T00:42:40,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-11-28T00:42:40,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63, server=97f25515df69,38585,1732754368938 in 224 msec 2024-11-28T00:42:40,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, ASSIGN in 383 msec 2024-11-28T00:42:40,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-11-28T00:42:40,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, ASSIGN in 386 msec 2024-11-28T00:42:40,492 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:42:40,492 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754560492"}]},"ts":"1732754560492"} 2024-11-28T00:42:40,495 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-28T00:42:40,498 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:42:40,498 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-28T00:42:40,508 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-28T00:42:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:42:40,577 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:40,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:40,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:40,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:42:40,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:40,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:40,589 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:40,589 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:42:40,593 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 666 msec 2024-11-28T00:42:41,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:42:41,068 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:42:41,068 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,071 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-28T00:42:41,071 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:41,071 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:42:41,073 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,079 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,086 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,090 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:42:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754561090 (current time:1732754561090). 2024-11-28T00:42:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:42:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-28T00:42:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:42:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ff6db20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:41,092 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:41,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:41,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:41,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68365cc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:41,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:41,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,094 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36704, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:41,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cab939a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:41,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:41,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:41,098 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46186, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:41,100 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:41,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:41,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,101 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a3d85ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:41,110 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:41,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:41,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:41,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f95aab9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:41,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:41,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,112 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36720, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:41,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26afce0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:41,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:41,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:41,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:41,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:42:41,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,123 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-28T00:42:41,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:42:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:42:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-28T00:42:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-28T00:42:41,127 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:42:41,128 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:42:41,130 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:42:41,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742096_1272 (size=161) 2024-11-28T00:42:41,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742096_1272 (size=161) 2024-11-28T00:42:41,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742096_1272 (size=161) 2024-11-28T00:42:41,160 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:42:41,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970}] 2024-11-28T00:42:41,162 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:41,163 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:41,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-28T00:42:41,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-11-28T00:42:41,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-11-28T00:42:41,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:41,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 7ee859a4f3f2733086f0f36798e3e970: 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for ec8c4d34a22f0b50125ac1d8afb59f63: 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. for emptySnaptb0-testConsecutiveExports completed. 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. for emptySnaptb0-testConsecutiveExports completed. 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:42:41,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:42:41,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742097_1273 (size=68) 2024-11-28T00:42:41,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742097_1273 (size=68) 2024-11-28T00:42:41,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742097_1273 (size=68) 2024-11-28T00:42:41,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:41,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-11-28T00:42:41,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-11-28T00:42:41,447 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:41,447 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:41,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-28T00:42:41,450 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63 in 289 msec 2024-11-28T00:42:41,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742098_1274 (size=68) 2024-11-28T00:42:41,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742098_1274 (size=68) 2024-11-28T00:42:41,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742098_1274 (size=68) 2024-11-28T00:42:41,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:41,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-28T00:42:41,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-11-28T00:42:41,465 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:41,465 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:41,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-11-28T00:42:41,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970 in 306 msec 2024-11-28T00:42:41,470 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:42:41,471 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:42:41,496 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:42:41,496 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:42:41,496 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:41,497 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:42:41,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742099_1275 (size=60) 2024-11-28T00:42:41,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742099_1275 (size=60) 2024-11-28T00:42:41,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742099_1275 (size=60) 2024-11-28T00:42:41,532 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:42:41,533 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-28T00:42:41,534 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-28T00:42:41,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742100_1276 (size=641) 2024-11-28T00:42:41,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742100_1276 (size=641) 2024-11-28T00:42:41,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742100_1276 (size=641) 2024-11-28T00:42:41,563 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:42:41,582 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:42:41,583 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-28T00:42:41,585 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:42:41,585 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-28T00:42:41,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 461 msec 2024-11-28T00:42:41,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-28T00:42:41,758 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:42:41,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:42:41,770 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:42:41,772 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,776 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-28T00:42:41,776 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:41,776 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:42:41,778 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,784 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,794 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:42:41,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:42:41,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754561797 (current time:1732754561797). 2024-11-28T00:42:41,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:42:41,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-28T00:42:41,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:42:41,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41ede76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:41,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:41,799 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:41,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:41,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:41,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@676babfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:41,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:41,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,800 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:41,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11e64955, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:41,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:41,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:41,804 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46196, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:41,805 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:41,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:41,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,805 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:41,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0d902f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:42:41,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:42:41,807 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:42:41,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:42:41,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:42:41,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d44a4b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:42:41,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:42:41,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,809 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36746, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:42:41,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bfbe9a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:42:41,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:42:41,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:42:41,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:42:41,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46210, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:42:41,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:42:41,817 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:42:41,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:42:41,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:42:41,818 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:42:41,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-28T00:42:41,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:42:41,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:42:41,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-28T00:42:41,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:42:41,821 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:42:41,822 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:42:41,825 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:42:41,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742101_1277 (size=156) 2024-11-28T00:42:41,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742101_1277 (size=156) 2024-11-28T00:42:41,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742101_1277 (size=156) 2024-11-28T00:42:41,837 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:42:41,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970}] 2024-11-28T00:42:41,838 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:41,838 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:41,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:42:41,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-11-28T00:42:41,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-11-28T00:42:41,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:41,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:41,990 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 7ee859a4f3f2733086f0f36798e3e970 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-28T00:42:41,990 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing ec8c4d34a22f0b50125ac1d8afb59f63 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-28T00:42:42,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128236039f69e304c38be27125e97c61f87_7ee859a4f3f2733086f0f36798e3e970 is 71, key is 16e68cc9ed7b777fce123b2cae08c620/cf:q/1732754561770/Put/seqid=0 2024-11-28T00:42:42,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286cd04730e05f48eabd993c7161096944_ec8c4d34a22f0b50125ac1d8afb59f63 is 71, key is 0b1e011ffe3602e08d225752ad1aad00/cf:q/1732754561767/Put/seqid=0 2024-11-28T00:42:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742102_1278 (size=8241) 2024-11-28T00:42:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742102_1278 (size=8241) 2024-11-28T00:42:42,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742102_1278 (size=8241) 2024-11-28T00:42:42,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:42,036 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128236039f69e304c38be27125e97c61f87_7ee859a4f3f2733086f0f36798e3e970 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128236039f69e304c38be27125e97c61f87_7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:42,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/.tmp/cf/9e236bc1f99947e4a1896f586292d7fd, store: [table=testtb-testConsecutiveExports family=cf region=7ee859a4f3f2733086f0f36798e3e970] 2024-11-28T00:42:42,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/.tmp/cf/9e236bc1f99947e4a1896f586292d7fd is 206, key is 18d66e001c49820a8caaeeb63f4f5882d/cf:q/1732754561770/Put/seqid=0 2024-11-28T00:42:42,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742103_1279 (size=5032) 2024-11-28T00:42:42,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742103_1279 (size=5032) 2024-11-28T00:42:42,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742103_1279 (size=5032) 2024-11-28T00:42:42,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:42,048 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286cd04730e05f48eabd993c7161096944_ec8c4d34a22f0b50125ac1d8afb59f63 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411286cd04730e05f48eabd993c7161096944_ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:42,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/.tmp/cf/856052d882a44065acb8b40bfa862f62, store: [table=testtb-testConsecutiveExports family=cf region=ec8c4d34a22f0b50125ac1d8afb59f63] 2024-11-28T00:42:42,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/.tmp/cf/856052d882a44065acb8b40bfa862f62 is 206, key is 0bf695f28ded30ea7c1b6ac7c58b83e87/cf:q/1732754561767/Put/seqid=0 2024-11-28T00:42:42,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742104_1280 (size=15055) 2024-11-28T00:42:42,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742104_1280 (size=15055) 2024-11-28T00:42:42,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742104_1280 (size=15055) 2024-11-28T00:42:42,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/.tmp/cf/9e236bc1f99947e4a1896f586292d7fd 2024-11-28T00:42:42,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/.tmp/cf/9e236bc1f99947e4a1896f586292d7fd as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/cf/9e236bc1f99947e4a1896f586292d7fd 2024-11-28T00:42:42,070 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/cf/9e236bc1f99947e4a1896f586292d7fd, entries=48, sequenceid=6, filesize=14.7 K 2024-11-28T00:42:42,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 7ee859a4f3f2733086f0f36798e3e970 in 81ms, sequenceid=6, compaction requested=false 2024-11-28T00:42:42,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-28T00:42:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 7ee859a4f3f2733086f0f36798e3e970: 2024-11-28T00:42:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. for snaptb0-testConsecutiveExports completed. 2024-11-28T00:42:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:42:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/cf/9e236bc1f99947e4a1896f586292d7fd] hfiles 2024-11-28T00:42:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/cf/9e236bc1f99947e4a1896f586292d7fd for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:42:42,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742105_1281 (size=5700) 2024-11-28T00:42:42,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742105_1281 (size=5700) 2024-11-28T00:42:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742105_1281 (size=5700) 2024-11-28T00:42:42,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/.tmp/cf/856052d882a44065acb8b40bfa862f62 2024-11-28T00:42:42,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/.tmp/cf/856052d882a44065acb8b40bfa862f62 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/cf/856052d882a44065acb8b40bfa862f62 2024-11-28T00:42:42,097 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/cf/856052d882a44065acb8b40bfa862f62, entries=2, sequenceid=6, filesize=5.6 K 2024-11-28T00:42:42,098 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for ec8c4d34a22f0b50125ac1d8afb59f63 in 108ms, sequenceid=6, compaction requested=false 2024-11-28T00:42:42,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for ec8c4d34a22f0b50125ac1d8afb59f63: 2024-11-28T00:42:42,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. for snaptb0-testConsecutiveExports completed. 2024-11-28T00:42:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:42:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:42:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/cf/856052d882a44065acb8b40bfa862f62] hfiles 2024-11-28T00:42:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/cf/856052d882a44065acb8b40bfa862f62 for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:42:42,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742106_1282 (size=107) 2024-11-28T00:42:42,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742106_1282 (size=107) 2024-11-28T00:42:42,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742106_1282 (size=107) 2024-11-28T00:42:42,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:42:42,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-11-28T00:42:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-11-28T00:42:42,102 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:42,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:42,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7ee859a4f3f2733086f0f36798e3e970 in 266 msec 2024-11-28T00:42:42,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742107_1283 (size=107) 2024-11-28T00:42:42,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742107_1283 (size=107) 2024-11-28T00:42:42,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742107_1283 (size=107) 2024-11-28T00:42:42,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:42:42,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-28T00:42:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-11-28T00:42:42,116 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:42,117 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:42,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=123, resume processing ppid=122 2024-11-28T00:42:42,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63 in 280 msec 2024-11-28T00:42:42,119 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:42:42,124 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:42:42,125 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:42:42,125 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:42:42,126 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:42:42,127 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128236039f69e304c38be27125e97c61f87_7ee859a4f3f2733086f0f36798e3e970, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411286cd04730e05f48eabd993c7161096944_ec8c4d34a22f0b50125ac1d8afb59f63] hfiles 2024-11-28T00:42:42,127 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128236039f69e304c38be27125e97c61f87_7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:42:42,127 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411286cd04730e05f48eabd993c7161096944_ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:42:42,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742108_1284 (size=291) 2024-11-28T00:42:42,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742108_1284 (size=291) 2024-11-28T00:42:42,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742108_1284 (size=291) 2024-11-28T00:42:42,134 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:42:42,134 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-28T00:42:42,135 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:42:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:42:42,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742109_1285 (size=951) 2024-11-28T00:42:42,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742109_1285 (size=951) 2024-11-28T00:42:42,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742109_1285 (size=951) 2024-11-28T00:42:42,149 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:42:42,155 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:42:42,155 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:42:42,157 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:42:42,157 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-28T00:42:42,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 338 msec 2024-11-28T00:42:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:42:42,448 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:42:42,449 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448 2024-11-28T00:42:42,449 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:42,480 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:42:42,480 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@17a4db9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:42:42,481 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:42:42,485 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:42:42,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:42,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:42,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,386 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-15779506013212801928.jar 2024-11-28T00:42:43,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,446 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-17449892017849646150.jar 2024-11-28T00:42:43,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:42:43,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:42:43,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:42:43,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:42:43,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:42:43,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:42:43,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:42:43,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:42:43,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:42:43,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:42:43,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:42:43,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:42:43,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:43,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:43,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:42:43,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:43,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:42:43,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:42:43,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:42:43,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742110_1286 (size=24020) 2024-11-28T00:42:43,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742110_1286 (size=24020) 2024-11-28T00:42:43,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742110_1286 (size=24020) 2024-11-28T00:42:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742111_1287 (size=77755) 2024-11-28T00:42:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742111_1287 (size=77755) 2024-11-28T00:42:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742111_1287 (size=77755) 2024-11-28T00:42:43,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742112_1288 (size=131360) 2024-11-28T00:42:43,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742112_1288 (size=131360) 2024-11-28T00:42:43,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742112_1288 (size=131360) 2024-11-28T00:42:43,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742113_1289 (size=111793) 2024-11-28T00:42:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742113_1289 (size=111793) 2024-11-28T00:42:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742113_1289 (size=111793) 2024-11-28T00:42:43,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742114_1290 (size=1832290) 2024-11-28T00:42:43,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742114_1290 (size=1832290) 2024-11-28T00:42:43,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742114_1290 (size=1832290) 2024-11-28T00:42:43,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742115_1291 (size=8360005) 2024-11-28T00:42:43,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742115_1291 (size=8360005) 2024-11-28T00:42:43,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742115_1291 (size=8360005) 2024-11-28T00:42:43,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742116_1292 (size=503880) 2024-11-28T00:42:43,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742116_1292 (size=503880) 2024-11-28T00:42:43,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742116_1292 (size=503880) 2024-11-28T00:42:43,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742117_1293 (size=322274) 2024-11-28T00:42:43,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742117_1293 (size=322274) 2024-11-28T00:42:43,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742117_1293 (size=322274) 2024-11-28T00:42:43,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742118_1294 (size=20406) 2024-11-28T00:42:43,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742118_1294 (size=20406) 2024-11-28T00:42:43,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742118_1294 (size=20406) 2024-11-28T00:42:43,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742119_1295 (size=45609) 2024-11-28T00:42:43,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742119_1295 (size=45609) 2024-11-28T00:42:43,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742119_1295 (size=45609) 2024-11-28T00:42:43,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742120_1296 (size=136454) 2024-11-28T00:42:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742120_1296 (size=136454) 2024-11-28T00:42:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742120_1296 (size=136454) 2024-11-28T00:42:43,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742121_1297 (size=1597136) 2024-11-28T00:42:43,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742121_1297 (size=1597136) 2024-11-28T00:42:43,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742121_1297 (size=1597136) 2024-11-28T00:42:43,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742122_1298 (size=30873) 2024-11-28T00:42:43,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742122_1298 (size=30873) 2024-11-28T00:42:43,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742122_1298 (size=30873) 2024-11-28T00:42:43,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742123_1299 (size=29229) 2024-11-28T00:42:43,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742123_1299 (size=29229) 2024-11-28T00:42:43,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742123_1299 (size=29229) 2024-11-28T00:42:43,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742124_1300 (size=903861) 2024-11-28T00:42:43,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742124_1300 (size=903861) 2024-11-28T00:42:43,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742124_1300 (size=903861) 2024-11-28T00:42:43,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742125_1301 (size=5175431) 2024-11-28T00:42:43,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742125_1301 (size=5175431) 2024-11-28T00:42:43,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742125_1301 (size=5175431) 2024-11-28T00:42:43,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742126_1302 (size=232881) 2024-11-28T00:42:43,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742126_1302 (size=232881) 2024-11-28T00:42:43,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742126_1302 (size=232881) 2024-11-28T00:42:43,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742127_1303 (size=1323991) 2024-11-28T00:42:43,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742127_1303 (size=1323991) 2024-11-28T00:42:43,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742127_1303 (size=1323991) 2024-11-28T00:42:43,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742128_1304 (size=4695811) 2024-11-28T00:42:43,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742128_1304 (size=4695811) 2024-11-28T00:42:43,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742128_1304 (size=4695811) 2024-11-28T00:42:43,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742129_1305 (size=1877034) 2024-11-28T00:42:43,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742129_1305 (size=1877034) 2024-11-28T00:42:43,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742129_1305 (size=1877034) 2024-11-28T00:42:43,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742130_1306 (size=440956) 2024-11-28T00:42:43,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742130_1306 (size=440956) 2024-11-28T00:42:43,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742130_1306 (size=440956) 2024-11-28T00:42:43,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742131_1307 (size=217555) 2024-11-28T00:42:43,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742131_1307 (size=217555) 2024-11-28T00:42:43,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742131_1307 (size=217555) 2024-11-28T00:42:43,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742132_1308 (size=6424743) 2024-11-28T00:42:43,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742132_1308 (size=6424743) 2024-11-28T00:42:43,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742132_1308 (size=6424743) 2024-11-28T00:42:43,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742133_1309 (size=4188619) 2024-11-28T00:42:43,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742133_1309 (size=4188619) 2024-11-28T00:42:43,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742133_1309 (size=4188619) 2024-11-28T00:42:43,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742134_1310 (size=127628) 2024-11-28T00:42:43,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742134_1310 (size=127628) 2024-11-28T00:42:43,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742134_1310 (size=127628) 2024-11-28T00:42:43,750 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:42:43,752 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-28T00:42:43,754 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-11-28T00:42:43,755 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:42:43,755 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-11-28T00:42:43,755 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-28T00:42:43,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742135_1311 (size=1023) 2024-11-28T00:42:43,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742135_1311 (size=1023) 2024-11-28T00:42:43,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742135_1311 (size=1023) 2024-11-28T00:42:43,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742136_1312 (size=35) 2024-11-28T00:42:43,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742136_1312 (size=35) 2024-11-28T00:42:43,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742136_1312 (size=35) 2024-11-28T00:42:43,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742137_1313 (size=304128) 2024-11-28T00:42:43,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742137_1313 (size=304128) 2024-11-28T00:42:43,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742137_1313 (size=304128) 2024-11-28T00:42:44,263 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:42:44,263 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:42:44,266 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0004_000001 (auth:SIMPLE) from 127.0.0.1:48846 2024-11-28T00:42:44,276 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000001/launch_container.sh] 2024-11-28T00:42:44,276 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000001/container_tokens] 2024-11-28T00:42:44,276 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0004/container_1732754375741_0004_01_000001/sysfs] 2024-11-28T00:42:45,042 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:42:45,119 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:36234 2024-11-28T00:42:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-28T00:42:48,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-28T00:42:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-28T00:42:50,243 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:49968 2024-11-28T00:42:50,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742138_1314 (size=349826) 2024-11-28T00:42:50,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742138_1314 (size=349826) 2024-11-28T00:42:50,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742138_1314 (size=349826) 2024-11-28T00:42:52,494 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:37012 2024-11-28T00:42:52,495 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:53006 2024-11-28T00:42:53,326 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:37028 2024-11-28T00:42:53,331 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:53022 2024-11-28T00:42:53,945 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:42:56,268 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0005_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:42:56,880 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:43:00,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742139_1315 (size=31814) 2024-11-28T00:43:00,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742139_1315 (size=31814) 2024-11-28T00:43:00,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742139_1315 (size=31814) 2024-11-28T00:43:00,969 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000005/launch_container.sh] 2024-11-28T00:43:00,969 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000005/container_tokens] 2024-11-28T00:43:00,969 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000005/sysfs] 2024-11-28T00:43:00,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742140_1316 (size=463) 2024-11-28T00:43:00,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742140_1316 (size=463) 2024-11-28T00:43:00,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742140_1316 (size=463) 2024-11-28T00:43:01,055 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000003/launch_container.sh] 2024-11-28T00:43:01,055 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000003/container_tokens] 2024-11-28T00:43:01,055 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000003/sysfs] 2024-11-28T00:43:01,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742141_1317 (size=31814) 2024-11-28T00:43:01,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742141_1317 (size=31814) 2024-11-28T00:43:01,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742141_1317 (size=31814) 2024-11-28T00:43:01,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742142_1318 (size=349826) 2024-11-28T00:43:01,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742142_1318 (size=349826) 2024-11-28T00:43:01,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742142_1318 (size=349826) 2024-11-28T00:43:01,128 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:37042 2024-11-28T00:43:01,139 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:53026 2024-11-28T00:43:01,154 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:53042 2024-11-28T00:43:03,006 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:43:03,006 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:43:03,010 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-28T00:43:03,010 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:43:03,010 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:43:03,010 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:43:03,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:43:03,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:43:03,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@17a4db9 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:43:03,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:43:03,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:43:03,014 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:03,057 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:03,057 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@17a4db9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:43:03,060 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:43:03,099 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:43:03,137 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:03,137 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:03,137 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:03,702 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000002/launch_container.sh] 2024-11-28T00:43:03,702 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000002/container_tokens] 2024-11-28T00:43:03,702 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000002/sysfs] 2024-11-28T00:43:04,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-9150660744766387024.jar 2024-11-28T00:43:04,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-11185469290868102961.jar 2024-11-28T00:43:04,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:04,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:43:04,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:43:04,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:43:04,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:43:04,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:43:04,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:43:04,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:43:04,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:43:04,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:43:04,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:43:04,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:43:04,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:04,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:04,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:43:04,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:04,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:04,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:43:04,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:43:04,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742143_1319 (size=24020) 2024-11-28T00:43:04,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742143_1319 (size=24020) 2024-11-28T00:43:04,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742143_1319 (size=24020) 2024-11-28T00:43:04,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742144_1320 (size=77755) 2024-11-28T00:43:04,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742144_1320 (size=77755) 2024-11-28T00:43:04,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742144_1320 (size=77755) 2024-11-28T00:43:04,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742145_1321 (size=131360) 2024-11-28T00:43:04,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742145_1321 (size=131360) 2024-11-28T00:43:04,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742145_1321 (size=131360) 2024-11-28T00:43:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742146_1322 (size=111793) 2024-11-28T00:43:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742146_1322 (size=111793) 2024-11-28T00:43:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742146_1322 (size=111793) 2024-11-28T00:43:04,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742147_1323 (size=1832290) 2024-11-28T00:43:04,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742147_1323 (size=1832290) 2024-11-28T00:43:04,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742147_1323 (size=1832290) 2024-11-28T00:43:04,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742148_1324 (size=8360005) 2024-11-28T00:43:04,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742148_1324 (size=8360005) 2024-11-28T00:43:04,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742148_1324 (size=8360005) 2024-11-28T00:43:04,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742149_1325 (size=440956) 2024-11-28T00:43:04,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742149_1325 (size=440956) 2024-11-28T00:43:04,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742149_1325 (size=440956) 2024-11-28T00:43:04,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742150_1326 (size=503880) 2024-11-28T00:43:04,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742150_1326 (size=503880) 2024-11-28T00:43:04,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742150_1326 (size=503880) 2024-11-28T00:43:04,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742151_1327 (size=322274) 2024-11-28T00:43:04,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742151_1327 (size=322274) 2024-11-28T00:43:04,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742151_1327 (size=322274) 2024-11-28T00:43:04,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742152_1328 (size=20406) 2024-11-28T00:43:04,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742152_1328 (size=20406) 2024-11-28T00:43:04,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742152_1328 (size=20406) 2024-11-28T00:43:04,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742153_1329 (size=45609) 2024-11-28T00:43:04,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742153_1329 (size=45609) 2024-11-28T00:43:04,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742153_1329 (size=45609) 2024-11-28T00:43:04,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742154_1330 (size=136454) 2024-11-28T00:43:04,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742154_1330 (size=136454) 2024-11-28T00:43:04,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742154_1330 (size=136454) 2024-11-28T00:43:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742155_1331 (size=1597136) 2024-11-28T00:43:04,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742155_1331 (size=1597136) 2024-11-28T00:43:04,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742155_1331 (size=1597136) 2024-11-28T00:43:04,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742156_1332 (size=30873) 2024-11-28T00:43:04,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742156_1332 (size=30873) 2024-11-28T00:43:04,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742156_1332 (size=30873) 2024-11-28T00:43:04,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742157_1333 (size=29229) 2024-11-28T00:43:04,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742157_1333 (size=29229) 2024-11-28T00:43:04,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742157_1333 (size=29229) 2024-11-28T00:43:04,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742158_1334 (size=903861) 2024-11-28T00:43:04,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742158_1334 (size=903861) 2024-11-28T00:43:04,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742158_1334 (size=903861) 2024-11-28T00:43:04,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742159_1335 (size=5175431) 2024-11-28T00:43:04,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742159_1335 (size=5175431) 2024-11-28T00:43:04,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742159_1335 (size=5175431) 2024-11-28T00:43:04,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742160_1336 (size=232881) 2024-11-28T00:43:04,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742160_1336 (size=232881) 2024-11-28T00:43:04,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742160_1336 (size=232881) 2024-11-28T00:43:04,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742161_1337 (size=1323991) 2024-11-28T00:43:04,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742161_1337 (size=1323991) 2024-11-28T00:43:04,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742161_1337 (size=1323991) 2024-11-28T00:43:04,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742162_1338 (size=4695811) 2024-11-28T00:43:04,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742162_1338 (size=4695811) 2024-11-28T00:43:04,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742162_1338 (size=4695811) 2024-11-28T00:43:04,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742163_1339 (size=1877034) 2024-11-28T00:43:04,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742163_1339 (size=1877034) 2024-11-28T00:43:04,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742163_1339 (size=1877034) 2024-11-28T00:43:04,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742164_1340 (size=217555) 2024-11-28T00:43:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742164_1340 (size=217555) 2024-11-28T00:43:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742164_1340 (size=217555) 2024-11-28T00:43:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742165_1341 (size=6424743) 2024-11-28T00:43:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742165_1341 (size=6424743) 2024-11-28T00:43:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742165_1341 (size=6424743) 2024-11-28T00:43:04,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742166_1342 (size=4188619) 2024-11-28T00:43:04,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742166_1342 (size=4188619) 2024-11-28T00:43:04,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742166_1342 (size=4188619) 2024-11-28T00:43:04,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742167_1343 (size=127628) 2024-11-28T00:43:04,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742167_1343 (size=127628) 2024-11-28T00:43:04,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742167_1343 (size=127628) 2024-11-28T00:43:04,525 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:43:04,527 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-28T00:43:04,528 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-11-28T00:43:04,528 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:43:04,528 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-11-28T00:43:04,528 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-28T00:43:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742168_1344 (size=1023) 2024-11-28T00:43:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742168_1344 (size=1023) 2024-11-28T00:43:04,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742168_1344 (size=1023) 2024-11-28T00:43:04,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742169_1345 (size=35) 2024-11-28T00:43:04,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742169_1345 (size=35) 2024-11-28T00:43:04,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742169_1345 (size=35) 2024-11-28T00:43:04,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742170_1346 (size=304126) 2024-11-28T00:43:04,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742170_1346 (size=304126) 2024-11-28T00:43:04,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742170_1346 (size=304126) 2024-11-28T00:43:05,723 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000004/launch_container.sh] 2024-11-28T00:43:05,723 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000004/container_tokens] 2024-11-28T00:43:05,723 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000004/sysfs] 2024-11-28T00:43:07,227 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:43:07,227 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:43:07,229 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0005_000001 (auth:SIMPLE) from 127.0.0.1:46552 2024-11-28T00:43:08,045 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0006_000001 (auth:SIMPLE) from 127.0.0.1:42924 2024-11-28T00:43:12,349 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000001/launch_container.sh] 2024-11-28T00:43:12,349 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000001/container_tokens] 2024-11-28T00:43:12,349 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0005/container_1732754375741_0005_01_000001/sysfs] 2024-11-28T00:43:12,588 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0006_000001 (auth:SIMPLE) from 127.0.0.1:58612 2024-11-28T00:43:12,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742171_1347 (size=349824) 2024-11-28T00:43:12,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742171_1347 (size=349824) 2024-11-28T00:43:12,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742171_1347 (size=349824) 2024-11-28T00:43:14,862 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0006_000001 (auth:SIMPLE) from 127.0.0.1:35740 2024-11-28T00:43:14,862 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0006_000001 (auth:SIMPLE) from 127.0.0.1:45292 2024-11-28T00:43:15,694 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0006_000001 (auth:SIMPLE) from 127.0.0.1:35756 2024-11-28T00:43:15,702 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0006_000001 (auth:SIMPLE) from 127.0.0.1:45298 2024-11-28T00:43:18,230 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0006_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:43:20,170 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000002/launch_container.sh] 2024-11-28T00:43:20,170 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000002/container_tokens] 2024-11-28T00:43:20,170 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000002/sysfs] 2024-11-28T00:43:22,488 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000004/launch_container.sh] 2024-11-28T00:43:22,488 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000004/container_tokens] 2024-11-28T00:43:22,488 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000004/sysfs] 2024-11-28T00:43:22,932 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000005/launch_container.sh] 2024-11-28T00:43:22,932 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000005/container_tokens] 2024-11-28T00:43:22,932 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000005/sysfs] 2024-11-28T00:43:22,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742172_1348 (size=29751) 2024-11-28T00:43:22,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742172_1348 (size=29751) 2024-11-28T00:43:22,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742172_1348 (size=29751) 2024-11-28T00:43:23,060 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000003/launch_container.sh] 2024-11-28T00:43:23,060 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000003/container_tokens] 2024-11-28T00:43:23,060 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000003/sysfs] 2024-11-28T00:43:23,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742173_1349 (size=463) 2024-11-28T00:43:23,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742173_1349 (size=463) 2024-11-28T00:43:23,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742173_1349 (size=463) 2024-11-28T00:43:23,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742174_1350 (size=29751) 2024-11-28T00:43:23,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742174_1350 (size=29751) 2024-11-28T00:43:23,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742174_1350 (size=29751) 2024-11-28T00:43:23,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742175_1351 (size=349824) 2024-11-28T00:43:23,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742175_1351 (size=349824) 2024-11-28T00:43:23,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742175_1351 (size=349824) 2024-11-28T00:43:25,413 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7ee859a4f3f2733086f0f36798e3e970, had cached 0 bytes from a total of 15055 2024-11-28T00:43:25,424 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ec8c4d34a22f0b50125ac1d8afb59f63, had cached 0 bytes from a total of 5700 2024-11-28T00:43:25,910 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:43:25,911 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:43:25,916 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-28T00:43:25,916 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:43:25,916 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:43:25,917 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:43:25,918 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:43:25,918 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:43:25,918 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@17a4db9 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:43:25,919 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:43:25,919 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754562448/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:43:25,940 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testConsecutiveExports 2024-11-28T00:43:25,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:43:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-28T00:43:25,945 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754605945"}]},"ts":"1732754605945"} 2024-11-28T00:43:25,947 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-28T00:43:25,947 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-28T00:43:25,949 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-28T00:43:25,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, UNASSIGN}] 2024-11-28T00:43:25,952 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, UNASSIGN 2024-11-28T00:43:25,952 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, UNASSIGN 2024-11-28T00:43:25,953 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=ec8c4d34a22f0b50125ac1d8afb59f63, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:43:25,953 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=7ee859a4f3f2733086f0f36798e3e970, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:25,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, UNASSIGN because future has completed 2024-11-28T00:43:25,955 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:43:25,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:43:25,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, UNASSIGN because future has completed 2024-11-28T00:43:25,957 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:43:25,957 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7ee859a4f3f2733086f0f36798e3e970, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-28T00:43:26,108 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:43:26,108 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:43:26,108 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing ec8c4d34a22f0b50125ac1d8afb59f63, disabling compactions & flushes 2024-11-28T00:43:26,108 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:43:26,108 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:43:26,108 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. after waiting 0 ms 2024-11-28T00:43:26,108 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:43:26,109 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:43:26,110 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:43:26,110 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 7ee859a4f3f2733086f0f36798e3e970, disabling compactions & flushes 2024-11-28T00:43:26,110 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:43:26,110 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:43:26,110 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. after waiting 0 ms 2024-11-28T00:43:26,110 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:43:26,118 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:43:26,118 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:43:26,119 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:43:26,119 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:43:26,119 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970. 2024-11-28T00:43:26,119 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63. 2024-11-28T00:43:26,119 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for ec8c4d34a22f0b50125ac1d8afb59f63: Waiting for close lock at 1732754606108Running coprocessor pre-close hooks at 1732754606108Disabling compacts and flushes for region at 1732754606108Disabling writes for close at 1732754606108Writing region close event to WAL at 1732754606109 (+1 ms)Running coprocessor post-close hooks at 1732754606119 (+10 ms)Closed at 1732754606119 2024-11-28T00:43:26,119 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 7ee859a4f3f2733086f0f36798e3e970: Waiting for close lock at 1732754606110Running coprocessor pre-close hooks at 1732754606110Disabling compacts and flushes for region at 1732754606110Disabling writes for close at 1732754606110Writing region close event to WAL at 1732754606110Running coprocessor post-close hooks at 1732754606119 (+9 ms)Closed at 1732754606119 2024-11-28T00:43:26,120 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:43:26,121 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=ec8c4d34a22f0b50125ac1d8afb59f63, regionState=CLOSED 2024-11-28T00:43:26,121 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:43:26,122 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=7ee859a4f3f2733086f0f36798e3e970, regionState=CLOSED 2024-11-28T00:43:26,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:43:26,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7ee859a4f3f2733086f0f36798e3e970, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:26,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-11-28T00:43:26,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-11-28T00:43:26,127 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 7ee859a4f3f2733086f0f36798e3e970, server=97f25515df69,43795,1732754369084 in 168 msec 2024-11-28T00:43:26,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure ec8c4d34a22f0b50125ac1d8afb59f63, server=97f25515df69,38585,1732754368938 in 169 msec 2024-11-28T00:43:26,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ec8c4d34a22f0b50125ac1d8afb59f63, UNASSIGN in 175 msec 2024-11-28T00:43:26,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-11-28T00:43:26,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7ee859a4f3f2733086f0f36798e3e970, UNASSIGN in 176 msec 2024-11-28T00:43:26,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-11-28T00:43:26,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 181 msec 2024-11-28T00:43:26,132 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754606132"}]},"ts":"1732754606132"} 2024-11-28T00:43:26,133 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-28T00:43:26,133 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-28T00:43:26,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 194 msec 2024-11-28T00:43:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-28T00:43:26,258 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:43:26,259 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testConsecutiveExports 2024-11-28T00:43:26,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:43:26,261 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:43:26,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-28T00:43:26,261 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:43:26,264 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-28T00:43:26,265 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:43:26,265 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:43:26,267 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/recovered.edits] 2024-11-28T00:43:26,269 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/recovered.edits] 2024-11-28T00:43:26,278 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/cf/9e236bc1f99947e4a1896f586292d7fd to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/cf/9e236bc1f99947e4a1896f586292d7fd 2024-11-28T00:43:26,280 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/cf/856052d882a44065acb8b40bfa862f62 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/cf/856052d882a44065acb8b40bfa862f62 2024-11-28T00:43:26,281 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970/recovered.edits/9.seqid 2024-11-28T00:43:26,281 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:43:26,282 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63/recovered.edits/9.seqid 2024-11-28T00:43:26,283 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testConsecutiveExports/ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:43:26,283 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-28T00:43:26,283 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-28T00:43:26,284 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-11-28T00:43:26,288 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128236039f69e304c38be27125e97c61f87_7ee859a4f3f2733086f0f36798e3e970 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128236039f69e304c38be27125e97c61f87_7ee859a4f3f2733086f0f36798e3e970 2024-11-28T00:43:26,289 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411286cd04730e05f48eabd993c7161096944_ec8c4d34a22f0b50125ac1d8afb59f63 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411286cd04730e05f48eabd993c7161096944_ec8c4d34a22f0b50125ac1d8afb59f63 2024-11-28T00:43:26,289 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-28T00:43:26,291 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:43:26,294 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-28T00:43:26,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,333 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,333 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:43:26,334 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:43:26,334 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:43:26,334 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:43:26,335 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-28T00:43:26,336 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:43:26,336 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-28T00:43:26,336 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754606336"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:26,337 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754606336"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:26,339 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:43:26,339 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ec8c4d34a22f0b50125ac1d8afb59f63, NAME => 'testtb-testConsecutiveExports,,1732754559924.ec8c4d34a22f0b50125ac1d8afb59f63.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7ee859a4f3f2733086f0f36798e3e970, NAME => 'testtb-testConsecutiveExports,1,1732754559924.7ee859a4f3f2733086f0f36798e3e970.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:43:26,339 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-28T00:43:26,340 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754606339"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:26,342 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-28T00:43:26,342 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:43:26,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:26,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:26,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-28T00:43:26,344 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,344 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:26,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:43:26,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:26,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 83 msec 2024-11-28T00:43:26,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-28T00:43:26,449 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-28T00:43:26,449 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:43:26,460 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-28T00:43:26,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-28T00:43:26,463 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-28T00:43:26,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-28T00:43:26,486 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=803 (was 807), OpenFileDescriptor=798 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=708 (was 797), ProcessCount=25 (was 27), AvailableMemoryMB=2062 (was 2369) 2024-11-28T00:43:26,486 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-11-28T00:43:26,505 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=803, OpenFileDescriptor=798, MaxFileDescriptor=1048576, SystemLoadAverage=708, ProcessCount=25, AvailableMemoryMB=2061 2024-11-28T00:43:26,505 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-11-28T00:43:26,506 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:43:26,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:26,508 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:43:26,508 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-11-28T00:43:26,508 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:43:26,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:43:26,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742176_1352 (size=458) 2024-11-28T00:43:26,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742176_1352 (size=458) 2024-11-28T00:43:26,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742176_1352 (size=458) 2024-11-28T00:43:26,517 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ac3c7155f593d6e22f31043f1aa2cc7b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:26,517 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 422ff34824aa47c48e470ec07e329858, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:26,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742177_1353 (size=83) 2024-11-28T00:43:26,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742177_1353 (size=83) 2024-11-28T00:43:26,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742178_1354 (size=83) 2024-11-28T00:43:26,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742177_1353 (size=83) 2024-11-28T00:43:26,523 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:26,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742178_1354 (size=83) 2024-11-28T00:43:26,523 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing ac3c7155f593d6e22f31043f1aa2cc7b, disabling compactions & flushes 2024-11-28T00:43:26,524 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:26,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742178_1354 (size=83) 2024-11-28T00:43:26,524 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:26,524 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. after waiting 0 ms 2024-11-28T00:43:26,524 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:26,524 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:26,524 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for ac3c7155f593d6e22f31043f1aa2cc7b: Waiting for close lock at 1732754606523Disabling compacts and flushes for region at 1732754606523Disabling writes for close at 1732754606524 (+1 ms)Writing region close event to WAL at 1732754606524Closed at 1732754606524 2024-11-28T00:43:26,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:43:26,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:43:26,880 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:43:26,924 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:26,924 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 422ff34824aa47c48e470ec07e329858, disabling compactions & flushes 2024-11-28T00:43:26,924 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:26,924 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:26,924 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. after waiting 0 ms 2024-11-28T00:43:26,924 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:26,924 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:26,924 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 422ff34824aa47c48e470ec07e329858: Waiting for close lock at 1732754606924Disabling compacts and flushes for region at 1732754606924Disabling writes for close at 1732754606924Writing region close event to WAL at 1732754606924Closed at 1732754606924 2024-11-28T00:43:26,926 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:43:26,926 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732754606926"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754606926"}]},"ts":"1732754606926"} 2024-11-28T00:43:26,926 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732754606926"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754606926"}]},"ts":"1732754606926"} 2024-11-28T00:43:26,928 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:43:26,929 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:43:26,930 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754606930"}]},"ts":"1732754606930"} 2024-11-28T00:43:26,931 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-28T00:43:26,932 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:43:26,933 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:43:26,933 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:43:26,933 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:43:26,933 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:43:26,933 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:43:26,933 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:43:26,933 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:43:26,933 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:43:26,933 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:43:26,933 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:43:26,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, ASSIGN}] 2024-11-28T00:43:26,934 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, ASSIGN 2024-11-28T00:43:26,934 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, ASSIGN 2024-11-28T00:43:26,935 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:43:26,935 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:43:27,086 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:43:27,087 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=ac3c7155f593d6e22f31043f1aa2cc7b, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:43:27,087 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=422ff34824aa47c48e470ec07e329858, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:27,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, ASSIGN because future has completed 2024-11-28T00:43:27,092 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 422ff34824aa47c48e470ec07e329858, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:27,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, ASSIGN because future has completed 2024-11-28T00:43:27,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:43:27,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:43:27,252 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:27,252 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 422ff34824aa47c48e470ec07e329858, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:43:27,253 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. service=AccessControlService 2024-11-28T00:43:27,254 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:27,254 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,254 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:27,255 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,255 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,255 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:27,255 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => ac3c7155f593d6e22f31043f1aa2cc7b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:43:27,255 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. service=AccessControlService 2024-11-28T00:43:27,255 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:27,256 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,256 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:27,256 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,256 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,256 INFO [StoreOpener-422ff34824aa47c48e470ec07e329858-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,257 INFO [StoreOpener-ac3c7155f593d6e22f31043f1aa2cc7b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,258 INFO [StoreOpener-422ff34824aa47c48e470ec07e329858-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 422ff34824aa47c48e470ec07e329858 columnFamilyName cf 2024-11-28T00:43:27,258 INFO [StoreOpener-ac3c7155f593d6e22f31043f1aa2cc7b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac3c7155f593d6e22f31043f1aa2cc7b columnFamilyName cf 2024-11-28T00:43:27,259 DEBUG [StoreOpener-422ff34824aa47c48e470ec07e329858-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:27,259 DEBUG [StoreOpener-ac3c7155f593d6e22f31043f1aa2cc7b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:27,259 INFO [StoreOpener-422ff34824aa47c48e470ec07e329858-1 {}] regionserver.HStore(327): Store=422ff34824aa47c48e470ec07e329858/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:27,259 INFO [StoreOpener-ac3c7155f593d6e22f31043f1aa2cc7b-1 {}] regionserver.HStore(327): Store=ac3c7155f593d6e22f31043f1aa2cc7b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:27,260 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,260 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,260 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,260 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,261 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,261 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,261 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,261 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,261 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,261 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,263 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,263 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,265 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:27,265 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:27,265 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 422ff34824aa47c48e470ec07e329858; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60074691, jitterRate=-0.10481734573841095}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:27,265 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,265 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened ac3c7155f593d6e22f31043f1aa2cc7b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68076815, jitterRate=0.014423593878746033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:27,265 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,266 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for ac3c7155f593d6e22f31043f1aa2cc7b: Running coprocessor pre-open hook at 1732754607256Writing region info on filesystem at 1732754607256Initializing all the Stores at 1732754607257 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754607257Cleaning up temporary data from old regions at 1732754607261 (+4 ms)Running coprocessor post-open hooks at 1732754607265 (+4 ms)Region opened successfully at 1732754607266 (+1 ms) 2024-11-28T00:43:27,266 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 422ff34824aa47c48e470ec07e329858: Running coprocessor pre-open hook at 1732754607255Writing region info on filesystem at 1732754607255Initializing all the Stores at 1732754607256 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754607256Cleaning up temporary data from old regions at 1732754607261 (+5 ms)Running coprocessor post-open hooks at 1732754607265 (+4 ms)Region opened successfully at 1732754607266 (+1 ms) 2024-11-28T00:43:27,267 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858., pid=135, masterSystemTime=1732754607244 2024-11-28T00:43:27,267 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b., pid=136, masterSystemTime=1732754607248 2024-11-28T00:43:27,269 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:27,269 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:27,270 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=ac3c7155f593d6e22f31043f1aa2cc7b, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:43:27,270 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:27,270 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:27,270 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=422ff34824aa47c48e470ec07e329858, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:27,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:43:27,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 422ff34824aa47c48e470ec07e329858, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:27,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-11-28T00:43:27,276 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-11-28T00:43:27,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b, server=97f25515df69,38585,1732754368938 in 180 msec 2024-11-28T00:43:27,276 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 422ff34824aa47c48e470ec07e329858, server=97f25515df69,43795,1732754369084 in 181 msec 2024-11-28T00:43:27,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, ASSIGN in 343 msec 2024-11-28T00:43:27,278 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=132 2024-11-28T00:43:27,278 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, ASSIGN in 343 msec 2024-11-28T00:43:27,278 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:43:27,279 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754607278"}]},"ts":"1732754607278"} 2024-11-28T00:43:27,280 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-28T00:43:27,280 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:43:27,281 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-28T00:43:27,283 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-28T00:43:27,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:27,353 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:27,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:27,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:27,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:27,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:27,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:27,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:27,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 858 msec 2024-11-28T00:43:27,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:43:27,649 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:43:27,649 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:27,652 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:27,652 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:27,653 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:43:27,654 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:27,659 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:27,664 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:27,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:43:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754607667 (current time:1732754607667). 2024-11-28T00:43:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:43:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-28T00:43:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:43:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1030fafb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:27,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:27,668 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:27,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:27,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:27,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32622946, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:27,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:27,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:27,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:27,670 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43348, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:27,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76a34c0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:27,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:27,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:27,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:27,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38130, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:27,673 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:27,673 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:27,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb093a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:27,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:27,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:27,675 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:27,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:27,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:27,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1edc0c60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:27,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:27,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:27,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:27,676 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43366, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:27,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec95257, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:27,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:27,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:27,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:27,678 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:27,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:43:27,681 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:27,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:27,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:27,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:27,681 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:27,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-28T00:43:27,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:43:27,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:43:27,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-28T00:43:27,684 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:43:27,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-28T00:43:27,684 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:43:27,686 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:43:27,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742179_1355 (size=215) 2024-11-28T00:43:27,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742179_1355 (size=215) 2024-11-28T00:43:27,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742179_1355 (size=215) 2024-11-28T00:43:27,692 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:43:27,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858}] 2024-11-28T00:43:27,693 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,693 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-28T00:43:27,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-28T00:43:27,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-11-28T00:43:27,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:27,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:27,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 422ff34824aa47c48e470ec07e329858: 2024-11-28T00:43:27,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for ac3c7155f593d6e22f31043f1aa2cc7b: 2024-11-28T00:43:27,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:43:27,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:43:27,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:27,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:27,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:27,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:43:27,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:27,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:43:27,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742180_1356 (size=86) 2024-11-28T00:43:27,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742180_1356 (size=86) 2024-11-28T00:43:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742180_1356 (size=86) 2024-11-28T00:43:27,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:27,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-28T00:43:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742181_1357 (size=86) 2024-11-28T00:43:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-28T00:43:27,858 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742181_1357 (size=86) 2024-11-28T00:43:27,858 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742181_1357 (size=86) 2024-11-28T00:43:27,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:27,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-28T00:43:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-11-28T00:43:27,860 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b in 167 msec 2024-11-28T00:43:27,860 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,860 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:27,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-11-28T00:43:27,863 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:43:27,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858 in 169 msec 2024-11-28T00:43:27,863 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:43:27,864 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:43:27,864 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:43:27,865 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:27,865 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:43:27,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742182_1358 (size=78) 2024-11-28T00:43:27,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742182_1358 (size=78) 2024-11-28T00:43:27,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742182_1358 (size=78) 2024-11-28T00:43:27,871 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:43:27,871 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:27,872 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:27,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742183_1359 (size=713) 2024-11-28T00:43:27,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742183_1359 (size=713) 2024-11-28T00:43:27,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742183_1359 (size=713) 2024-11-28T00:43:27,881 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:43:27,885 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:43:27,885 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:27,886 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:43:27,886 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-28T00:43:27,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 205 msec 2024-11-28T00:43:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-28T00:43:27,998 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:43:28,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:43:28,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:43:28,007 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:28,009 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,009 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:28,010 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:43:28,011 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:28,016 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:28,022 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:43:28,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:43:28,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754608025 (current time:1732754608025). 2024-11-28T00:43:28,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:43:28,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-28T00:43:28,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:43:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@682fd8a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:28,027 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:28,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:28,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:28,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@479a3c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:28,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:28,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:28,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:28,028 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43390, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:28,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@178eda0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:28,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:28,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:28,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:28,031 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38146, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:28,032 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:28,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:28,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:28,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:28,032 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fe7e53b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:28,034 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:28,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:28,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:28,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@336cdcdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:28,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:28,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:28,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:28,036 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43402, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:28,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1998be82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:28,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:28,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:28,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:28,039 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38152, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:28,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:43:28,042 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:28,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:28,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:28,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:28,042 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:28,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-28T00:43:28,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:43:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:43:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-28T00:43:28,045 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:43:28,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:43:28,046 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:43:28,048 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:43:28,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742184_1360 (size=210) 2024-11-28T00:43:28,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742184_1360 (size=210) 2024-11-28T00:43:28,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742184_1360 (size=210) 2024-11-28T00:43:28,054 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:43:28,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858}] 2024-11-28T00:43:28,055 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:28,056 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:28,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:43:28,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-11-28T00:43:28,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-28T00:43:28,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:28,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:28,208 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing ac3c7155f593d6e22f31043f1aa2cc7b 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-28T00:43:28,208 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 422ff34824aa47c48e470ec07e329858 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-28T00:43:28,225 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411282fabfaae6497401d8521757d4e4d2669_422ff34824aa47c48e470ec07e329858 is 71, key is 1137286bcdba08f3b896f97ecd4f0c31/cf:q/1732754608005/Put/seqid=0 2024-11-28T00:43:28,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cb351afcce3a474b9d311675db76fa87_ac3c7155f593d6e22f31043f1aa2cc7b is 71, key is 004826313c4e0d01b5592d884e784381/cf:q/1732754608003/Put/seqid=0 2024-11-28T00:43:28,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742185_1361 (size=8241) 2024-11-28T00:43:28,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742185_1361 (size=8241) 2024-11-28T00:43:28,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742185_1361 (size=8241) 2024-11-28T00:43:28,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:28,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742186_1362 (size=5032) 2024-11-28T00:43:28,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742186_1362 (size=5032) 2024-11-28T00:43:28,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742186_1362 (size=5032) 2024-11-28T00:43:28,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:28,237 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411282fabfaae6497401d8521757d4e4d2669_422ff34824aa47c48e470ec07e329858 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411282fabfaae6497401d8521757d4e4d2669_422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:28,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/.tmp/cf/3b910f14b057459ba36e40f61cba6d57, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=422ff34824aa47c48e470ec07e329858] 2024-11-28T00:43:28,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/.tmp/cf/3b910f14b057459ba36e40f61cba6d57 is 224, key is 192bf1c67f3c4a7542e37c94ac446f8b5/cf:q/1732754608005/Put/seqid=0 2024-11-28T00:43:28,239 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cb351afcce3a474b9d311675db76fa87_ac3c7155f593d6e22f31043f1aa2cc7b to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241128cb351afcce3a474b9d311675db76fa87_ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:28,240 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/.tmp/cf/0db85993e54b4c19aaba18846d88ad78, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=ac3c7155f593d6e22f31043f1aa2cc7b] 2024-11-28T00:43:28,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/.tmp/cf/0db85993e54b4c19aaba18846d88ad78 is 224, key is 07b2e729522671a6282b1df13792a51c3/cf:q/1732754608003/Put/seqid=0 2024-11-28T00:43:28,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742187_1363 (size=15937) 2024-11-28T00:43:28,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742187_1363 (size=15937) 2024-11-28T00:43:28,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742187_1363 (size=15937) 2024-11-28T00:43:28,246 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/.tmp/cf/3b910f14b057459ba36e40f61cba6d57 2024-11-28T00:43:28,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742188_1364 (size=5754) 2024-11-28T00:43:28,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742188_1364 (size=5754) 2024-11-28T00:43:28,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742188_1364 (size=5754) 2024-11-28T00:43:28,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/.tmp/cf/3b910f14b057459ba36e40f61cba6d57 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/cf/3b910f14b057459ba36e40f61cba6d57 2024-11-28T00:43:28,253 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/.tmp/cf/0db85993e54b4c19aaba18846d88ad78 2024-11-28T00:43:28,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/cf/3b910f14b057459ba36e40f61cba6d57, entries=48, sequenceid=6, filesize=15.6 K 2024-11-28T00:43:28,259 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 422ff34824aa47c48e470ec07e329858 in 52ms, sequenceid=6, compaction requested=false 2024-11-28T00:43:28,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/.tmp/cf/0db85993e54b4c19aaba18846d88ad78 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/cf/0db85993e54b4c19aaba18846d88ad78 2024-11-28T00:43:28,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-28T00:43:28,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 422ff34824aa47c48e470ec07e329858: 2024-11-28T00:43:28,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:43:28,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:28,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/cf/3b910f14b057459ba36e40f61cba6d57] hfiles 2024-11-28T00:43:28,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/cf/3b910f14b057459ba36e40f61cba6d57 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,264 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/cf/0db85993e54b4c19aaba18846d88ad78, entries=2, sequenceid=6, filesize=5.6 K 2024-11-28T00:43:28,265 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for ac3c7155f593d6e22f31043f1aa2cc7b in 58ms, sequenceid=6, compaction requested=false 2024-11-28T00:43:28,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for ac3c7155f593d6e22f31043f1aa2cc7b: 2024-11-28T00:43:28,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:43:28,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:28,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/cf/0db85993e54b4c19aaba18846d88ad78] hfiles 2024-11-28T00:43:28,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/cf/0db85993e54b4c19aaba18846d88ad78 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742189_1365 (size=125) 2024-11-28T00:43:28,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742189_1365 (size=125) 2024-11-28T00:43:28,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742189_1365 (size=125) 2024-11-28T00:43:28,274 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:28,274 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-11-28T00:43:28,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-11-28T00:43:28,275 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:28,275 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:28,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 422ff34824aa47c48e470ec07e329858 in 222 msec 2024-11-28T00:43:28,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742190_1366 (size=125) 2024-11-28T00:43:28,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742190_1366 (size=125) 2024-11-28T00:43:28,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742190_1366 (size=125) 2024-11-28T00:43:28,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:28,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-28T00:43:28,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-28T00:43:28,286 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:28,286 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:28,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=140 2024-11-28T00:43:28,288 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:43:28,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b in 233 msec 2024-11-28T00:43:28,290 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:43:28,291 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:43:28,291 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:43:28,291 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:28,292 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411282fabfaae6497401d8521757d4e4d2669_422ff34824aa47c48e470ec07e329858, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241128cb351afcce3a474b9d311675db76fa87_ac3c7155f593d6e22f31043f1aa2cc7b] hfiles 2024-11-28T00:43:28,292 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411282fabfaae6497401d8521757d4e4d2669_422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:28,292 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241128cb351afcce3a474b9d311675db76fa87_ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742191_1367 (size=309) 2024-11-28T00:43:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742191_1367 (size=309) 2024-11-28T00:43:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742191_1367 (size=309) 2024-11-28T00:43:28,299 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:43:28,299 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,299 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742192_1368 (size=1023) 2024-11-28T00:43:28,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742192_1368 (size=1023) 2024-11-28T00:43:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742192_1368 (size=1023) 2024-11-28T00:43:28,314 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:43:28,319 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:43:28,319 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,320 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:43:28,320 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-28T00:43:28,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 277 msec 2024-11-28T00:43:28,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:43:28,368 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:43:28,371 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38154, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:43:28,371 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52156, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:43:28,371 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37568, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:43:28,383 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:43:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:28,385 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:43:28,385 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:28,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-11-28T00:43:28,386 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:43:28,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:43:28,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742193_1369 (size=399) 2024-11-28T00:43:28,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742193_1369 (size=399) 2024-11-28T00:43:28,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742193_1369 (size=399) 2024-11-28T00:43:28,400 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0318930bb763783e93512e4d52b5ca0d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:28,401 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 0327ad36443e915bc805346e38167e30, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742194_1370 (size=85) 2024-11-28T00:43:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742194_1370 (size=85) 2024-11-28T00:43:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742195_1371 (size=85) 2024-11-28T00:43:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742195_1371 (size=85) 2024-11-28T00:43:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742194_1370 (size=85) 2024-11-28T00:43:28,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742195_1371 (size=85) 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 0327ad36443e915bc805346e38167e30, disabling compactions & flushes 2024-11-28T00:43:28,409 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 0318930bb763783e93512e4d52b5ca0d, disabling compactions & flushes 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:28,409 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. after waiting 0 ms 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. after waiting 0 ms 2024-11-28T00:43:28,409 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:28,409 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 0327ad36443e915bc805346e38167e30: Waiting for close lock at 1732754608409Disabling compacts and flushes for region at 1732754608409Disabling writes for close at 1732754608409Writing region close event to WAL at 1732754608409Closed at 1732754608409 2024-11-28T00:43:28,409 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0318930bb763783e93512e4d52b5ca0d: Waiting for close lock at 1732754608409Disabling compacts and flushes for region at 1732754608409Disabling writes for close at 1732754608409Writing region close event to WAL at 1732754608409Closed at 1732754608409 2024-11-28T00:43:28,410 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:43:28,411 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732754608410"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754608410"}]},"ts":"1732754608410"} 2024-11-28T00:43:28,411 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732754608410"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754608410"}]},"ts":"1732754608410"} 2024-11-28T00:43:28,413 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:43:28,414 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:43:28,414 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754608414"}]},"ts":"1732754608414"} 2024-11-28T00:43:28,416 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-28T00:43:28,417 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:43:28,418 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:43:28,418 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:43:28,418 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:43:28,418 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:43:28,418 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:43:28,418 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:43:28,418 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:43:28,418 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:43:28,418 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:43:28,418 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:43:28,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, ASSIGN}] 2024-11-28T00:43:28,420 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, ASSIGN 2024-11-28T00:43:28,420 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, ASSIGN 2024-11-28T00:43:28,421 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:43:28,421 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:43:28,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:28,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-28T00:43:28,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-28T00:43:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:43:28,572 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:43:28,572 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=0327ad36443e915bc805346e38167e30, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:43:28,572 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=0318930bb763783e93512e4d52b5ca0d, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:28,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, ASSIGN because future has completed 2024-11-28T00:43:28,575 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0327ad36443e915bc805346e38167e30, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:43:28,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, ASSIGN because future has completed 2024-11-28T00:43:28,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0318930bb763783e93512e4d52b5ca0d, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:28,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:43:28,731 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:28,731 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:28,731 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 0327ad36443e915bc805346e38167e30, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30.', STARTKEY => '2', ENDKEY => ''} 2024-11-28T00:43:28,731 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 0318930bb763783e93512e4d52b5ca0d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d.', STARTKEY => '', ENDKEY => '2'} 2024-11-28T00:43:28,731 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. service=AccessControlService 2024-11-28T00:43:28,731 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. service=AccessControlService 2024-11-28T00:43:28,732 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:28,732 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,732 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,733 INFO [StoreOpener-0318930bb763783e93512e4d52b5ca0d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,733 INFO [StoreOpener-0327ad36443e915bc805346e38167e30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,734 INFO [StoreOpener-0327ad36443e915bc805346e38167e30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0327ad36443e915bc805346e38167e30 columnFamilyName cf 2024-11-28T00:43:28,734 INFO [StoreOpener-0318930bb763783e93512e4d52b5ca0d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0318930bb763783e93512e4d52b5ca0d columnFamilyName cf 2024-11-28T00:43:28,734 DEBUG [StoreOpener-0327ad36443e915bc805346e38167e30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:28,734 DEBUG [StoreOpener-0318930bb763783e93512e4d52b5ca0d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:28,735 INFO [StoreOpener-0318930bb763783e93512e4d52b5ca0d-1 {}] regionserver.HStore(327): Store=0318930bb763783e93512e4d52b5ca0d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:28,735 INFO [StoreOpener-0327ad36443e915bc805346e38167e30-1 {}] regionserver.HStore(327): Store=0327ad36443e915bc805346e38167e30/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:28,735 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,735 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,735 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,735 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,736 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,736 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,736 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,736 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,736 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,736 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,737 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,737 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,739 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:28,739 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:28,739 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 0327ad36443e915bc805346e38167e30; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67546235, jitterRate=0.006517335772514343}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:28,739 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:28,739 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 0318930bb763783e93512e4d52b5ca0d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70754342, jitterRate=0.054321855306625366}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:28,739 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:28,740 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 0327ad36443e915bc805346e38167e30: Running coprocessor pre-open hook at 1732754608732Writing region info on filesystem at 1732754608732Initializing all the Stores at 1732754608733 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754608733Cleaning up temporary data from old regions at 1732754608736 (+3 ms)Running coprocessor post-open hooks at 1732754608739 (+3 ms)Region opened successfully at 1732754608740 (+1 ms) 2024-11-28T00:43:28,740 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 0318930bb763783e93512e4d52b5ca0d: Running coprocessor pre-open hook at 1732754608732Writing region info on filesystem at 1732754608732Initializing all the Stores at 1732754608733 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754608733Cleaning up temporary data from old regions at 1732754608736 (+3 ms)Running coprocessor post-open hooks at 1732754608739 (+3 ms)Region opened successfully at 1732754608740 (+1 ms) 2024-11-28T00:43:28,740 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30., pid=146, masterSystemTime=1732754608727 2024-11-28T00:43:28,740 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d., pid=147, masterSystemTime=1732754608728 2024-11-28T00:43:28,742 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:28,742 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:28,742 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=0318930bb763783e93512e4d52b5ca0d, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:28,743 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:28,743 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:28,743 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=0327ad36443e915bc805346e38167e30, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:43:28,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0318930bb763783e93512e4d52b5ca0d, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:28,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0327ad36443e915bc805346e38167e30, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:43:28,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-11-28T00:43:28,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 0318930bb763783e93512e4d52b5ca0d, server=97f25515df69,43795,1732754369084 in 169 msec 2024-11-28T00:43:28,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-11-28T00:43:28,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 0327ad36443e915bc805346e38167e30, server=97f25515df69,38585,1732754368938 in 171 msec 2024-11-28T00:43:28,748 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, ASSIGN in 327 msec 2024-11-28T00:43:28,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-28T00:43:28,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, ASSIGN in 328 msec 2024-11-28T00:43:28,749 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:43:28,749 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754608749"}]},"ts":"1732754608749"} 2024-11-28T00:43:28,751 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-28T00:43:28,752 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:43:28,752 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-28T00:43:28,756 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-28T00:43:28,764 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:28,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:28,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:28,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:28,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 392 msec 2024-11-28T00:43:29,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:43:29,018 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:43:29,021 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:43:29,025 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30., hostname=97f25515df69,38585,1732754368938, seqNum=2] 2024-11-28T00:43:29,027 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-28T00:43:29,040 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.3 merge regions [0318930bb763783e93512e4d52b5ca0d, 0327ad36443e915bc805346e38167e30] 2024-11-28T00:43:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0318930bb763783e93512e4d52b5ca0d, 0327ad36443e915bc805346e38167e30], force=true 2024-11-28T00:43:29,047 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0318930bb763783e93512e4d52b5ca0d, 0327ad36443e915bc805346e38167e30], force=true 2024-11-28T00:43:29,047 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0318930bb763783e93512e4d52b5ca0d, 0327ad36443e915bc805346e38167e30], force=true 2024-11-28T00:43:29,047 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0318930bb763783e93512e4d52b5ca0d, 0327ad36443e915bc805346e38167e30], force=true 2024-11-28T00:43:29,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:43:29,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, UNASSIGN}] 2024-11-28T00:43:29,054 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, UNASSIGN 2024-11-28T00:43:29,054 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, UNASSIGN 2024-11-28T00:43:29,055 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=0318930bb763783e93512e4d52b5ca0d, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:29,055 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=0327ad36443e915bc805346e38167e30, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:43:29,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, UNASSIGN because future has completed 2024-11-28T00:43:29,057 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-28T00:43:29,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0318930bb763783e93512e4d52b5ca0d, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:29,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, UNASSIGN because future has completed 2024-11-28T00:43:29,058 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-28T00:43:29,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0327ad36443e915bc805346e38167e30, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:43:29,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:43:29,209 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:29,209 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-28T00:43:29,210 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 0318930bb763783e93512e4d52b5ca0d, disabling compactions & flushes 2024-11-28T00:43:29,210 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:29,210 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:29,210 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. after waiting 0 ms 2024-11-28T00:43:29,210 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:29,210 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 0318930bb763783e93512e4d52b5ca0d 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-28T00:43:29,210 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:29,210 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-28T00:43:29,210 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 0327ad36443e915bc805346e38167e30, disabling compactions & flushes 2024-11-28T00:43:29,210 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:29,210 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:29,211 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. after waiting 0 ms 2024-11-28T00:43:29,211 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:29,211 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 0327ad36443e915bc805346e38167e30 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-28T00:43:29,224 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/.tmp/cf/53dc1067991b47bea2a75c3aaa017e84 is 28, key is 1/cf:/1732754609022/Put/seqid=0 2024-11-28T00:43:29,224 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/.tmp/cf/973acbbeacb84f36b3c612da7e181f49 is 28, key is 2/cf:/1732754609026/Put/seqid=0 2024-11-28T00:43:29,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742197_1373 (size=4945) 2024-11-28T00:43:29,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742197_1373 (size=4945) 2024-11-28T00:43:29,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742196_1372 (size=4945) 2024-11-28T00:43:29,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742196_1372 (size=4945) 2024-11-28T00:43:29,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742197_1373 (size=4945) 2024-11-28T00:43:29,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742196_1372 (size=4945) 2024-11-28T00:43:29,230 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/.tmp/cf/973acbbeacb84f36b3c612da7e181f49 2024-11-28T00:43:29,230 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/.tmp/cf/53dc1067991b47bea2a75c3aaa017e84 2024-11-28T00:43:29,234 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/.tmp/cf/53dc1067991b47bea2a75c3aaa017e84 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/cf/53dc1067991b47bea2a75c3aaa017e84 2024-11-28T00:43:29,234 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/.tmp/cf/973acbbeacb84f36b3c612da7e181f49 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/cf/973acbbeacb84f36b3c612da7e181f49 2024-11-28T00:43:29,239 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/cf/53dc1067991b47bea2a75c3aaa017e84, entries=1, sequenceid=5, filesize=4.8 K 2024-11-28T00:43:29,239 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/cf/973acbbeacb84f36b3c612da7e181f49, entries=1, sequenceid=5, filesize=4.8 K 2024-11-28T00:43:29,240 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 0318930bb763783e93512e4d52b5ca0d in 30ms, sequenceid=5, compaction requested=false 2024-11-28T00:43:29,240 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 0327ad36443e915bc805346e38167e30 in 29ms, sequenceid=5, compaction requested=false 2024-11-28T00:43:29,240 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-28T00:43:29,240 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-28T00:43:29,244 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:43:29,244 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:43:29,245 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:43:29,245 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:43:29,245 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. 2024-11-28T00:43:29,245 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. 2024-11-28T00:43:29,245 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 0327ad36443e915bc805346e38167e30: Waiting for close lock at 1732754609210Running coprocessor pre-close hooks at 1732754609210Disabling compacts and flushes for region at 1732754609210Disabling writes for close at 1732754609211 (+1 ms)Obtaining lock to block concurrent updates at 1732754609211Preparing flush snapshotting stores in 0327ad36443e915bc805346e38167e30 at 1732754609211Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732754609211Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30. at 1732754609211Flushing 0327ad36443e915bc805346e38167e30/cf: creating writer at 1732754609211Flushing 0327ad36443e915bc805346e38167e30/cf: appending metadata at 1732754609224 (+13 ms)Flushing 0327ad36443e915bc805346e38167e30/cf: closing flushed file at 1732754609224Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@859d8c4: reopening flushed file at 1732754609234 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 0327ad36443e915bc805346e38167e30 in 29ms, sequenceid=5, compaction requested=false at 1732754609240 (+6 ms)Writing region close event to WAL at 1732754609242 (+2 ms)Running coprocessor post-close hooks at 1732754609244 (+2 ms)Closed at 1732754609245 (+1 ms) 2024-11-28T00:43:29,245 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 0318930bb763783e93512e4d52b5ca0d: Waiting for close lock at 1732754609209Running coprocessor pre-close hooks at 1732754609209Disabling compacts and flushes for region at 1732754609209Disabling writes for close at 1732754609210 (+1 ms)Obtaining lock to block concurrent updates at 1732754609210Preparing flush snapshotting stores in 0318930bb763783e93512e4d52b5ca0d at 1732754609210Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732754609210Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d. at 1732754609211 (+1 ms)Flushing 0318930bb763783e93512e4d52b5ca0d/cf: creating writer at 1732754609211Flushing 0318930bb763783e93512e4d52b5ca0d/cf: appending metadata at 1732754609224 (+13 ms)Flushing 0318930bb763783e93512e4d52b5ca0d/cf: closing flushed file at 1732754609224Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28fd6366: reopening flushed file at 1732754609234 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 0318930bb763783e93512e4d52b5ca0d in 30ms, sequenceid=5, compaction requested=false at 1732754609240 (+6 ms)Writing region close event to WAL at 1732754609241 (+1 ms)Running coprocessor post-close hooks at 1732754609245 (+4 ms)Closed at 1732754609245 2024-11-28T00:43:29,246 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 0327ad36443e915bc805346e38167e30 2024-11-28T00:43:29,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=0327ad36443e915bc805346e38167e30, regionState=CLOSED 2024-11-28T00:43:29,247 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:29,247 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=0318930bb763783e93512e4d52b5ca0d, regionState=CLOSED 2024-11-28T00:43:29,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0327ad36443e915bc805346e38167e30, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:43:29,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0318930bb763783e93512e4d52b5ca0d, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:29,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-11-28T00:43:29,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 0327ad36443e915bc805346e38167e30, server=97f25515df69,38585,1732754368938 in 191 msec 2024-11-28T00:43:29,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-11-28T00:43:29,251 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0327ad36443e915bc805346e38167e30, UNASSIGN in 198 msec 2024-11-28T00:43:29,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 0318930bb763783e93512e4d52b5ca0d, server=97f25515df69,43795,1732754369084 in 193 msec 2024-11-28T00:43:29,253 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-11-28T00:43:29,253 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0318930bb763783e93512e4d52b5ca0d, UNASSIGN in 198 msec 2024-11-28T00:43:29,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742198_1374 (size=84) 2024-11-28T00:43:29,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742198_1374 (size=84) 2024-11-28T00:43:29,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742198_1374 (size=84) 2024-11-28T00:43:29,263 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:29,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742199_1375 (size=20) 2024-11-28T00:43:29,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742199_1375 (size=20) 2024-11-28T00:43:29,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742199_1375 (size=20) 2024-11-28T00:43:29,270 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:29,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742200_1376 (size=21) 2024-11-28T00:43:29,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742200_1376 (size=21) 2024-11-28T00:43:29,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742200_1376 (size=21) 2024-11-28T00:43:29,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742201_1377 (size=84) 2024-11-28T00:43:29,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742201_1377 (size=84) 2024-11-28T00:43:29,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742201_1377 (size=84) 2024-11-28T00:43:29,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:43:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:43:29,684 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:29,694 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-28T00:43:29,696 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608382.0318930bb763783e93512e4d52b5ca0d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:29,696 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732754608382.0327ad36443e915bc805346e38167e30.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:29,696 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:29,700 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, ASSIGN}] 2024-11-28T00:43:29,700 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, ASSIGN 2024-11-28T00:43:29,701 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, ASSIGN; state=MERGED, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:43:29,851 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:43:29,852 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=d68e3d8f5d5916d06b2877ee6efc9c48, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:29,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, ASSIGN because future has completed 2024-11-28T00:43:29,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:30,009 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:30,009 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => d68e3d8f5d5916d06b2877ee6efc9c48, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:43:30,009 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. service=AccessControlService 2024-11-28T00:43:30,010 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:30,010 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,010 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:30,010 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,010 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,011 INFO [StoreOpener-d68e3d8f5d5916d06b2877ee6efc9c48-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,012 INFO [StoreOpener-d68e3d8f5d5916d06b2877ee6efc9c48-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d68e3d8f5d5916d06b2877ee6efc9c48 columnFamilyName cf 2024-11-28T00:43:30,012 DEBUG [StoreOpener-d68e3d8f5d5916d06b2877ee6efc9c48-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:30,021 DEBUG [StoreOpener-d68e3d8f5d5916d06b2877ee6efc9c48-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/53dc1067991b47bea2a75c3aaa017e84.0318930bb763783e93512e4d52b5ca0d->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/cf/53dc1067991b47bea2a75c3aaa017e84-top 2024-11-28T00:43:30,026 DEBUG [StoreOpener-d68e3d8f5d5916d06b2877ee6efc9c48-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/973acbbeacb84f36b3c612da7e181f49.0327ad36443e915bc805346e38167e30->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/cf/973acbbeacb84f36b3c612da7e181f49-top 2024-11-28T00:43:30,026 INFO [StoreOpener-d68e3d8f5d5916d06b2877ee6efc9c48-1 {}] regionserver.HStore(327): Store=d68e3d8f5d5916d06b2877ee6efc9c48/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:30,026 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,027 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,028 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,028 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,028 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,029 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,030 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened d68e3d8f5d5916d06b2877ee6efc9c48; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66882235, jitterRate=-0.0033770352602005005}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:30,030 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,031 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for d68e3d8f5d5916d06b2877ee6efc9c48: Running coprocessor pre-open hook at 1732754610010Writing region info on filesystem at 1732754610010Initializing all the Stores at 1732754610011 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754610011Cleaning up temporary data from old regions at 1732754610028 (+17 ms)Running coprocessor post-open hooks at 1732754610030 (+2 ms)Region opened successfully at 1732754610031 (+1 ms) 2024-11-28T00:43:30,032 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48., pid=154, masterSystemTime=1732754610006 2024-11-28T00:43:30,032 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48.,because compaction is disabled. 2024-11-28T00:43:30,033 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:30,033 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:30,034 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=d68e3d8f5d5916d06b2877ee6efc9c48, regionState=OPEN, openSeqNum=9, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:30,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:30,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-28T00:43:30,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48, server=97f25515df69,43795,1732754369084 in 183 msec 2024-11-28T00:43:30,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-11-28T00:43:30,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, ASSIGN in 338 msec 2024-11-28T00:43:30,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0318930bb763783e93512e4d52b5ca0d, 0327ad36443e915bc805346e38167e30], force=true in 997 msec 2024-11-28T00:43:30,045 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0006_000001 (auth:SIMPLE) from 127.0.0.1:49158 2024-11-28T00:43:30,056 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000001/launch_container.sh] 2024-11-28T00:43:30,056 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000001/container_tokens] 2024-11-28T00:43:30,056 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0006/container_1732754375741_0006_01_000001/sysfs] 2024-11-28T00:43:30,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:43:30,189 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:43:30,190 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-28T00:43:30,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754610190 (current time:1732754610190). 2024-11-28T00:43:30,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:43:30,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-28T00:43:30,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:43:30,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@587ce59f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:30,195 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:30,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:30,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:30,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@306b5062, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:30,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:30,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:30,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:30,199 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43410, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:30,200 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7047cc8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:30,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:30,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:30,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:30,202 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:30,204 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:30,204 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:30,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f903d2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:30,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:30,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:30,206 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:30,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:30,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:30,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cdfae43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:30,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:30,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:30,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:30,207 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43436, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:30,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e8f6636, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:30,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:30,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:30,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:30,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38168, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:30,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:43:30,212 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:30,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:30,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:30,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:30,212 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:30,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-28T00:43:30,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:43:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-28T00:43:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-28T00:43:30,215 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:43:30,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-28T00:43:30,216 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:43:30,218 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:43:30,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742202_1378 (size=216) 2024-11-28T00:43:30,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742202_1378 (size=216) 2024-11-28T00:43:30,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742202_1378 (size=216) 2024-11-28T00:43:30,225 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:43:30,225 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48}] 2024-11-28T00:43:30,226 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-28T00:43:30,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-11-28T00:43:30,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:30,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for d68e3d8f5d5916d06b2877ee6efc9c48: 2024-11-28T00:43:30,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-28T00:43:30,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:30,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/53dc1067991b47bea2a75c3aaa017e84.0318930bb763783e93512e4d52b5ca0d->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/cf/53dc1067991b47bea2a75c3aaa017e84-top, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/973acbbeacb84f36b3c612da7e181f49.0327ad36443e915bc805346e38167e30->hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/cf/973acbbeacb84f36b3c612da7e181f49-top] hfiles 2024-11-28T00:43:30,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/53dc1067991b47bea2a75c3aaa017e84.0318930bb763783e93512e4d52b5ca0d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/973acbbeacb84f36b3c612da7e181f49.0327ad36443e915bc805346e38167e30 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742203_1379 (size=269) 2024-11-28T00:43:30,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742203_1379 (size=269) 2024-11-28T00:43:30,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742203_1379 (size=269) 2024-11-28T00:43:30,388 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:30,388 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-28T00:43:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-11-28T00:43:30,389 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,389 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:30,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-11-28T00:43:30,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48 in 165 msec 2024-11-28T00:43:30,391 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:43:30,392 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:43:30,392 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:43:30,392 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,393 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742204_1380 (size=670) 2024-11-28T00:43:30,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742204_1380 (size=670) 2024-11-28T00:43:30,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742204_1380 (size=670) 2024-11-28T00:43:30,406 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:43:30,411 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:43:30,412 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,413 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:43:30,413 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-28T00:43:30,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 200 msec 2024-11-28T00:43:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-28T00:43:30,527 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:43:30,528 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528 2024-11-28T00:43:30,528 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:30,558 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:30,558 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,559 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:43:30,563 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742206_1382 (size=216) 2024-11-28T00:43:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742206_1382 (size=216) 2024-11-28T00:43:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742206_1382 (size=216) 2024-11-28T00:43:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742205_1381 (size=670) 2024-11-28T00:43:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742205_1381 (size=670) 2024-11-28T00:43:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742205_1381 (size=670) 2024-11-28T00:43:30,577 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:30,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:30,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-10500008046931513087.jar 2024-11-28T00:43:31,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-17340340398221448699.jar 2024-11-28T00:43:31,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:43:31,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:43:31,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:43:31,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:43:31,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:43:31,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:43:31,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:43:31,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:43:31,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:43:31,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:43:31,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:43:31,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:43:31,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:31,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:31,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:43:31,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:31,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:43:31,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:43:31,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:43:31,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742207_1383 (size=24020) 2024-11-28T00:43:31,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742207_1383 (size=24020) 2024-11-28T00:43:31,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742207_1383 (size=24020) 2024-11-28T00:43:31,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742208_1384 (size=77755) 2024-11-28T00:43:31,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742208_1384 (size=77755) 2024-11-28T00:43:31,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742208_1384 (size=77755) 2024-11-28T00:43:31,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742209_1385 (size=131360) 2024-11-28T00:43:31,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742209_1385 (size=131360) 2024-11-28T00:43:31,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742209_1385 (size=131360) 2024-11-28T00:43:31,642 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:43:31,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742210_1386 (size=111793) 2024-11-28T00:43:31,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742210_1386 (size=111793) 2024-11-28T00:43:31,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742210_1386 (size=111793) 2024-11-28T00:43:31,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742211_1387 (size=1832290) 2024-11-28T00:43:31,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742211_1387 (size=1832290) 2024-11-28T00:43:31,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742211_1387 (size=1832290) 2024-11-28T00:43:31,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742212_1388 (size=8360005) 2024-11-28T00:43:31,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742212_1388 (size=8360005) 2024-11-28T00:43:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742212_1388 (size=8360005) 2024-11-28T00:43:31,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742213_1389 (size=440956) 2024-11-28T00:43:31,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742213_1389 (size=440956) 2024-11-28T00:43:31,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742213_1389 (size=440956) 2024-11-28T00:43:31,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742214_1390 (size=503880) 2024-11-28T00:43:31,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742214_1390 (size=503880) 2024-11-28T00:43:31,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742214_1390 (size=503880) 2024-11-28T00:43:31,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742215_1391 (size=322274) 2024-11-28T00:43:31,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742215_1391 (size=322274) 2024-11-28T00:43:31,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742215_1391 (size=322274) 2024-11-28T00:43:31,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742216_1392 (size=20406) 2024-11-28T00:43:31,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742216_1392 (size=20406) 2024-11-28T00:43:31,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742216_1392 (size=20406) 2024-11-28T00:43:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742217_1393 (size=45609) 2024-11-28T00:43:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742217_1393 (size=45609) 2024-11-28T00:43:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742217_1393 (size=45609) 2024-11-28T00:43:31,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742218_1394 (size=136454) 2024-11-28T00:43:31,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742218_1394 (size=136454) 2024-11-28T00:43:31,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742218_1394 (size=136454) 2024-11-28T00:43:31,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742219_1395 (size=1597136) 2024-11-28T00:43:31,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742219_1395 (size=1597136) 2024-11-28T00:43:31,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742219_1395 (size=1597136) 2024-11-28T00:43:31,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742220_1396 (size=30873) 2024-11-28T00:43:31,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742220_1396 (size=30873) 2024-11-28T00:43:31,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742220_1396 (size=30873) 2024-11-28T00:43:31,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742221_1397 (size=29229) 2024-11-28T00:43:31,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742221_1397 (size=29229) 2024-11-28T00:43:31,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742221_1397 (size=29229) 2024-11-28T00:43:31,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742222_1398 (size=6424743) 2024-11-28T00:43:31,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742222_1398 (size=6424743) 2024-11-28T00:43:31,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742222_1398 (size=6424743) 2024-11-28T00:43:31,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742223_1399 (size=903861) 2024-11-28T00:43:31,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742223_1399 (size=903861) 2024-11-28T00:43:31,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742223_1399 (size=903861) 2024-11-28T00:43:31,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742224_1400 (size=5175431) 2024-11-28T00:43:31,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742224_1400 (size=5175431) 2024-11-28T00:43:31,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742224_1400 (size=5175431) 2024-11-28T00:43:31,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742225_1401 (size=232881) 2024-11-28T00:43:31,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742225_1401 (size=232881) 2024-11-28T00:43:31,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742225_1401 (size=232881) 2024-11-28T00:43:31,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742226_1402 (size=1323991) 2024-11-28T00:43:31,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742226_1402 (size=1323991) 2024-11-28T00:43:31,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742226_1402 (size=1323991) 2024-11-28T00:43:31,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742227_1403 (size=4695811) 2024-11-28T00:43:31,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742227_1403 (size=4695811) 2024-11-28T00:43:31,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742227_1403 (size=4695811) 2024-11-28T00:43:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742228_1404 (size=1877034) 2024-11-28T00:43:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742228_1404 (size=1877034) 2024-11-28T00:43:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742228_1404 (size=1877034) 2024-11-28T00:43:31,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742229_1405 (size=217555) 2024-11-28T00:43:31,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742229_1405 (size=217555) 2024-11-28T00:43:31,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742229_1405 (size=217555) 2024-11-28T00:43:31,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742230_1406 (size=4188619) 2024-11-28T00:43:31,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742230_1406 (size=4188619) 2024-11-28T00:43:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742230_1406 (size=4188619) 2024-11-28T00:43:31,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742231_1407 (size=127628) 2024-11-28T00:43:31,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742231_1407 (size=127628) 2024-11-28T00:43:31,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742231_1407 (size=127628) 2024-11-28T00:43:31,908 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:43:31,910 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-28T00:43:31,911 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-11-28T00:43:31,911 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-11-28T00:43:31,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742232_1408 (size=481) 2024-11-28T00:43:31,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742232_1408 (size=481) 2024-11-28T00:43:31,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742232_1408 (size=481) 2024-11-28T00:43:31,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742233_1409 (size=21) 2024-11-28T00:43:31,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742233_1409 (size=21) 2024-11-28T00:43:31,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742233_1409 (size=21) 2024-11-28T00:43:32,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742234_1410 (size=304138) 2024-11-28T00:43:32,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742234_1410 (size=304138) 2024-11-28T00:43:32,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742234_1410 (size=304138) 2024-11-28T00:43:32,354 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:43:32,354 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:43:33,037 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0007_000001 (auth:SIMPLE) from 127.0.0.1:49452 2024-11-28T00:43:36,875 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ac3c7155f593d6e22f31043f1aa2cc7b changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:43:36,875 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 422ff34824aa47c48e470ec07e329858 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:43:38,195 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0007_000001 (auth:SIMPLE) from 127.0.0.1:44726 2024-11-28T00:43:38,441 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:38,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-28T00:43:38,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742235_1411 (size=349836) 2024-11-28T00:43:38,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742235_1411 (size=349836) 2024-11-28T00:43:38,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742235_1411 (size=349836) 2024-11-28T00:43:40,498 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0007_000001 (auth:SIMPLE) from 127.0.0.1:49460 2024-11-28T00:43:40,501 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0007_000001 (auth:SIMPLE) from 127.0.0.1:59266 2024-11-28T00:43:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742236_1412 (size=4945) 2024-11-28T00:43:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742236_1412 (size=4945) 2024-11-28T00:43:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742236_1412 (size=4945) 2024-11-28T00:43:44,050 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000002/launch_container.sh] 2024-11-28T00:43:44,050 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000002/container_tokens] 2024-11-28T00:43:44,050 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000002/sysfs] 2024-11-28T00:43:45,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742238_1414 (size=4945) 2024-11-28T00:43:45,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742238_1414 (size=4945) 2024-11-28T00:43:45,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742238_1414 (size=4945) 2024-11-28T00:43:45,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742237_1413 (size=22246) 2024-11-28T00:43:45,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742237_1413 (size=22246) 2024-11-28T00:43:45,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742237_1413 (size=22246) 2024-11-28T00:43:45,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742239_1415 (size=482) 2024-11-28T00:43:45,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742239_1415 (size=482) 2024-11-28T00:43:45,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742239_1415 (size=482) 2024-11-28T00:43:45,903 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000003/launch_container.sh] 2024-11-28T00:43:45,903 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000003/container_tokens] 2024-11-28T00:43:45,903 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000003/sysfs] 2024-11-28T00:43:45,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742240_1416 (size=22246) 2024-11-28T00:43:45,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742240_1416 (size=22246) 2024-11-28T00:43:45,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742240_1416 (size=22246) 2024-11-28T00:43:45,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742241_1417 (size=349836) 2024-11-28T00:43:45,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742241_1417 (size=349836) 2024-11-28T00:43:45,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742241_1417 (size=349836) 2024-11-28T00:43:45,990 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0007_000001 (auth:SIMPLE) from 127.0.0.1:40078 2024-11-28T00:43:47,676 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:43:47,678 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:43:47,683 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:47,684 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:43:47,684 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:43:47,684 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:47,684 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-28T00:43:47,684 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-28T00:43:47,684 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:47,685 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-28T00:43:47,685 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754610528/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-28T00:43:47,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:47,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:47,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-28T00:43:47,694 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754627694"}]},"ts":"1732754627694"} 2024-11-28T00:43:47,696 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-28T00:43:47,696 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-28T00:43:47,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-28T00:43:47,697 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, UNASSIGN}] 2024-11-28T00:43:47,698 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, UNASSIGN 2024-11-28T00:43:47,699 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=d68e3d8f5d5916d06b2877ee6efc9c48, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:47,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, UNASSIGN because future has completed 2024-11-28T00:43:47,700 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:43:47,700 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:47,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-28T00:43:47,852 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:47,852 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:43:47,852 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing d68e3d8f5d5916d06b2877ee6efc9c48, disabling compactions & flushes 2024-11-28T00:43:47,852 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:47,853 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:47,853 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. after waiting 0 ms 2024-11-28T00:43:47,853 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:47,858 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-28T00:43:47,859 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:43:47,859 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48. 2024-11-28T00:43:47,859 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for d68e3d8f5d5916d06b2877ee6efc9c48: Waiting for close lock at 1732754627852Running coprocessor pre-close hooks at 1732754627852Disabling compacts and flushes for region at 1732754627852Disabling writes for close at 1732754627853 (+1 ms)Writing region close event to WAL at 1732754627853Running coprocessor post-close hooks at 1732754627859 (+6 ms)Closed at 1732754627859 2024-11-28T00:43:47,860 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:47,861 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=d68e3d8f5d5916d06b2877ee6efc9c48, regionState=CLOSED 2024-11-28T00:43:47,862 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:47,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-11-28T00:43:47,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure d68e3d8f5d5916d06b2877ee6efc9c48, server=97f25515df69,43795,1732754369084 in 162 msec 2024-11-28T00:43:47,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-11-28T00:43:47,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d68e3d8f5d5916d06b2877ee6efc9c48, UNASSIGN in 168 msec 2024-11-28T00:43:47,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-11-28T00:43:47,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 171 msec 2024-11-28T00:43:47,870 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754627870"}]},"ts":"1732754627870"} 2024-11-28T00:43:47,871 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-28T00:43:47,872 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-28T00:43:47,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 182 msec 2024-11-28T00:43:48,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-28T00:43:48,008 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:43:48,008 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,010 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,011 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,013 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,014 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:48,014 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:48,015 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/recovered.edits] 2024-11-28T00:43:48,015 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/recovered.edits] 2024-11-28T00:43:48,015 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30 2024-11-28T00:43:48,016 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/recovered.edits] 2024-11-28T00:43:48,018 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/cf/53dc1067991b47bea2a75c3aaa017e84 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/cf/53dc1067991b47bea2a75c3aaa017e84 2024-11-28T00:43:48,018 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/53dc1067991b47bea2a75c3aaa017e84.0318930bb763783e93512e4d52b5ca0d to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/53dc1067991b47bea2a75c3aaa017e84.0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:48,019 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/cf/973acbbeacb84f36b3c612da7e181f49 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/cf/973acbbeacb84f36b3c612da7e181f49 2024-11-28T00:43:48,019 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/973acbbeacb84f36b3c612da7e181f49.0327ad36443e915bc805346e38167e30 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/cf/973acbbeacb84f36b3c612da7e181f49.0327ad36443e915bc805346e38167e30 2024-11-28T00:43:48,020 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/recovered.edits/8.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d/recovered.edits/8.seqid 2024-11-28T00:43:48,021 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0318930bb763783e93512e4d52b5ca0d 2024-11-28T00:43:48,028 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/recovered.edits/12.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48/recovered.edits/12.seqid 2024-11-28T00:43:48,029 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d68e3d8f5d5916d06b2877ee6efc9c48 2024-11-28T00:43:48,036 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/recovered.edits/8.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30/recovered.edits/8.seqid 2024-11-28T00:43:48,036 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0327ad36443e915bc805346e38167e30 2024-11-28T00:43:48,036 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-28T00:43:48,045 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,049 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-28T00:43:48,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,150 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:43:48,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:43:48,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:43:48,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:43:48,152 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-28T00:43:48,153 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,153 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-28T00:43:48,153 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754628153"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:48,154 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-28T00:43:48,154 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d68e3d8f5d5916d06b2877ee6efc9c48, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T00:43:48,155 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-28T00:43:48,155 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754628155"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:48,156 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-28T00:43:48,157 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 149 msec 2024-11-28T00:43:48,163 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,163 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-11-28T00:43:48,164 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:48,164 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:43:48,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:48,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:48,165 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-28T00:43:48,168 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754628167"}]},"ts":"1732754628167"} 2024-11-28T00:43:48,169 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-28T00:43:48,169 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-28T00:43:48,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-28T00:43:48,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, UNASSIGN}] 2024-11-28T00:43:48,172 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, UNASSIGN 2024-11-28T00:43:48,172 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, UNASSIGN 2024-11-28T00:43:48,172 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=422ff34824aa47c48e470ec07e329858, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:48,172 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=ac3c7155f593d6e22f31043f1aa2cc7b, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:43:48,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, UNASSIGN because future has completed 2024-11-28T00:43:48,174 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:43:48,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 422ff34824aa47c48e470ec07e329858, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:48,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, UNASSIGN because future has completed 2024-11-28T00:43:48,175 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:43:48,175 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:43:48,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-28T00:43:48,326 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:48,326 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:43:48,326 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 422ff34824aa47c48e470ec07e329858, disabling compactions & flushes 2024-11-28T00:43:48,326 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:48,327 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:48,327 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. after waiting 0 ms 2024-11-28T00:43:48,327 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:48,327 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:48,327 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:43:48,327 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing ac3c7155f593d6e22f31043f1aa2cc7b, disabling compactions & flushes 2024-11-28T00:43:48,327 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:48,327 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:48,328 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. after waiting 0 ms 2024-11-28T00:43:48,328 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:48,331 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:43:48,331 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:43:48,332 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858. 2024-11-28T00:43:48,332 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 422ff34824aa47c48e470ec07e329858: Waiting for close lock at 1732754628326Running coprocessor pre-close hooks at 1732754628326Disabling compacts and flushes for region at 1732754628326Disabling writes for close at 1732754628327 (+1 ms)Writing region close event to WAL at 1732754628327Running coprocessor post-close hooks at 1732754628331 (+4 ms)Closed at 1732754628331 2024-11-28T00:43:48,332 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:43:48,332 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:43:48,332 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b. 2024-11-28T00:43:48,332 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for ac3c7155f593d6e22f31043f1aa2cc7b: Waiting for close lock at 1732754628327Running coprocessor pre-close hooks at 1732754628327Disabling compacts and flushes for region at 1732754628327Disabling writes for close at 1732754628328 (+1 ms)Writing region close event to WAL at 1732754628328Running coprocessor post-close hooks at 1732754628332 (+4 ms)Closed at 1732754628332 2024-11-28T00:43:48,333 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:48,334 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=422ff34824aa47c48e470ec07e329858, regionState=CLOSED 2024-11-28T00:43:48,334 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:48,335 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=ac3c7155f593d6e22f31043f1aa2cc7b, regionState=CLOSED 2024-11-28T00:43:48,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 422ff34824aa47c48e470ec07e329858, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:48,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:43:48,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-28T00:43:48,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 422ff34824aa47c48e470ec07e329858, server=97f25515df69,43795,1732754369084 in 164 msec 2024-11-28T00:43:48,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-11-28T00:43:48,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure ac3c7155f593d6e22f31043f1aa2cc7b, server=97f25515df69,38585,1732754368938 in 165 msec 2024-11-28T00:43:48,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=422ff34824aa47c48e470ec07e329858, UNASSIGN in 170 msec 2024-11-28T00:43:48,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=163 2024-11-28T00:43:48,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ac3c7155f593d6e22f31043f1aa2cc7b, UNASSIGN in 172 msec 2024-11-28T00:43:48,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-28T00:43:48,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 176 msec 2024-11-28T00:43:48,349 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754628349"}]},"ts":"1732754628349"} 2024-11-28T00:43:48,351 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-28T00:43:48,351 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-28T00:43:48,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 186 msec 2024-11-28T00:43:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-28T00:43:48,489 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:43:48,491 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,494 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,495 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,497 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,500 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:48,500 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:48,501 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/recovered.edits] 2024-11-28T00:43:48,501 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/recovered.edits] 2024-11-28T00:43:48,505 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/cf/3b910f14b057459ba36e40f61cba6d57 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/cf/3b910f14b057459ba36e40f61cba6d57 2024-11-28T00:43:48,505 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/cf/0db85993e54b4c19aaba18846d88ad78 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/cf/0db85993e54b4c19aaba18846d88ad78 2024-11-28T00:43:48,507 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858/recovered.edits/9.seqid 2024-11-28T00:43:48,508 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b/recovered.edits/9.seqid 2024-11-28T00:43:48,508 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:48,508 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithMergeRegion/ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:48,508 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-28T00:43:48,509 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-28T00:43:48,509 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-11-28T00:43:48,512 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411282fabfaae6497401d8521757d4e4d2669_422ff34824aa47c48e470ec07e329858 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411282fabfaae6497401d8521757d4e4d2669_422ff34824aa47c48e470ec07e329858 2024-11-28T00:43:48,513 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241128cb351afcce3a474b9d311675db76fa87_ac3c7155f593d6e22f31043f1aa2cc7b to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241128cb351afcce3a474b9d311675db76fa87_ac3c7155f593d6e22f31043f1aa2cc7b 2024-11-28T00:43:48,514 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-28T00:43:48,515 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,518 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-28T00:43:48,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,531 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:43:48,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:43:48,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:43:48,533 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:43:48,533 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-28T00:43:48,534 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,534 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-28T00:43:48,535 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754628534"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:48,535 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754628534"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:48,537 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:43:48,537 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ac3c7155f593d6e22f31043f1aa2cc7b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732754606506.ac3c7155f593d6e22f31043f1aa2cc7b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 422ff34824aa47c48e470ec07e329858, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732754606506.422ff34824aa47c48e470ec07e329858.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:43:48,537 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-28T00:43:48,537 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754628537"}]},"ts":"9223372036854775807"} 2024-11-28T00:43:48,539 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-28T00:43:48,539 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 48 msec 2024-11-28T00:43:48,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,541 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,542 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:48,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-28T00:43:48,542 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,543 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:43:48,548 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-28T00:43:48,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,551 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-28T00:43:48,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:43:48,553 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-28T00:43:48,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:43:48,577 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=816 (was 803) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6318 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:38917 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:46587 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1619510192_1 at /127.0.0.1:59926 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:42418 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38917 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:35415 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1619510192_1 at /127.0.0.1:42380 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:54042 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:59942 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 143278) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 798) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=684 (was 708), ProcessCount=24 (was 25), AvailableMemoryMB=2073 (was 2061) - AvailableMemoryMB LEAK? - 2024-11-28T00:43:48,577 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=816 is superior to 500 2024-11-28T00:43:48,595 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=814, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=684, ProcessCount=24, AvailableMemoryMB=2073 2024-11-28T00:43:48,595 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-11-28T00:43:48,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:43:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:43:48,597 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:43:48,598 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-11-28T00:43:48,598 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:43:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-28T00:43:48,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742242_1418 (size=443) 2024-11-28T00:43:48,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742242_1418 (size=443) 2024-11-28T00:43:48,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742242_1418 (size=443) 2024-11-28T00:43:48,606 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => da45016b67794bb77005d2e661c58149, NAME => 'testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:48,606 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 639a0f77d3533d45d64c08f1929affce, NAME => 'testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742243_1419 (size=68) 2024-11-28T00:43:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742243_1419 (size=68) 2024-11-28T00:43:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742243_1419 (size=68) 2024-11-28T00:43:48,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742244_1420 (size=68) 2024-11-28T00:43:48,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742244_1420 (size=68) 2024-11-28T00:43:48,613 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:48,613 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing da45016b67794bb77005d2e661c58149, disabling compactions & flushes 2024-11-28T00:43:48,613 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:48,613 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:48,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742244_1420 (size=68) 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. after waiting 0 ms 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:48,614 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for da45016b67794bb77005d2e661c58149: Waiting for close lock at 1732754628613Disabling compacts and flushes for region at 1732754628613Disabling writes for close at 1732754628614 (+1 ms)Writing region close event to WAL at 1732754628614Closed at 1732754628614 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 639a0f77d3533d45d64c08f1929affce, disabling compactions & flushes 2024-11-28T00:43:48,614 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. after waiting 0 ms 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:48,614 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:48,614 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 639a0f77d3533d45d64c08f1929affce: Waiting for close lock at 1732754628614Disabling compacts and flushes for region at 1732754628614Disabling writes for close at 1732754628614Writing region close event to WAL at 1732754628614Closed at 1732754628614 2024-11-28T00:43:48,615 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:43:48,616 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732754628615"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754628615"}]},"ts":"1732754628615"} 2024-11-28T00:43:48,616 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732754628615"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754628615"}]},"ts":"1732754628615"} 2024-11-28T00:43:48,618 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:43:48,619 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:43:48,619 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754628619"}]},"ts":"1732754628619"} 2024-11-28T00:43:48,620 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-28T00:43:48,621 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:43:48,622 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:43:48,622 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:43:48,622 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:43:48,622 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:43:48,622 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:43:48,622 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:43:48,622 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:43:48,622 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:43:48,622 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:43:48,622 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:43:48,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, ASSIGN}] 2024-11-28T00:43:48,624 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, ASSIGN 2024-11-28T00:43:48,624 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, ASSIGN 2024-11-28T00:43:48,625 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:43:48,625 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:43:48,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-28T00:43:48,775 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:43:48,776 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=639a0f77d3533d45d64c08f1929affce, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:43:48,776 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=da45016b67794bb77005d2e661c58149, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:48,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, ASSIGN because future has completed 2024-11-28T00:43:48,780 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 639a0f77d3533d45d64c08f1929affce, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:43:48,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, ASSIGN because future has completed 2024-11-28T00:43:48,782 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure da45016b67794bb77005d2e661c58149, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:48,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-28T00:43:48,940 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:48,940 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:48,941 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 639a0f77d3533d45d64c08f1929affce, NAME => 'testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:43:48,941 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => da45016b67794bb77005d2e661c58149, NAME => 'testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:43:48,941 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. service=AccessControlService 2024-11-28T00:43:48,941 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. service=AccessControlService 2024-11-28T00:43:48,942 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:48,942 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:48,942 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,942 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,942 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:48,942 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:48,943 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,943 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,943 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,943 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,944 INFO [StoreOpener-da45016b67794bb77005d2e661c58149-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,945 INFO [StoreOpener-639a0f77d3533d45d64c08f1929affce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,946 INFO [StoreOpener-da45016b67794bb77005d2e661c58149-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da45016b67794bb77005d2e661c58149 columnFamilyName cf 2024-11-28T00:43:48,946 INFO [StoreOpener-639a0f77d3533d45d64c08f1929affce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 639a0f77d3533d45d64c08f1929affce columnFamilyName cf 2024-11-28T00:43:48,946 DEBUG [StoreOpener-da45016b67794bb77005d2e661c58149-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:48,946 DEBUG [StoreOpener-639a0f77d3533d45d64c08f1929affce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:48,947 INFO [StoreOpener-da45016b67794bb77005d2e661c58149-1 {}] regionserver.HStore(327): Store=da45016b67794bb77005d2e661c58149/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:48,947 INFO [StoreOpener-639a0f77d3533d45d64c08f1929affce-1 {}] regionserver.HStore(327): Store=639a0f77d3533d45d64c08f1929affce/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:48,947 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,947 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,948 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,948 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,948 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,948 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,949 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,949 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,949 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,949 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,950 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,950 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,952 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:48,952 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:48,952 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 639a0f77d3533d45d64c08f1929affce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75306944, jitterRate=0.1221609115600586}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:48,952 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened da45016b67794bb77005d2e661c58149; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68508895, jitterRate=0.020862087607383728}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:48,952 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:48,952 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for da45016b67794bb77005d2e661c58149 2024-11-28T00:43:48,953 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 639a0f77d3533d45d64c08f1929affce: Running coprocessor pre-open hook at 1732754628943Writing region info on filesystem at 1732754628943Initializing all the Stores at 1732754628944 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754628944Cleaning up temporary data from old regions at 1732754628949 (+5 ms)Running coprocessor post-open hooks at 1732754628952 (+3 ms)Region opened successfully at 1732754628953 (+1 ms) 2024-11-28T00:43:48,953 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for da45016b67794bb77005d2e661c58149: Running coprocessor pre-open hook at 1732754628943Writing region info on filesystem at 1732754628943Initializing all the Stores at 1732754628944 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754628944Cleaning up temporary data from old regions at 1732754628949 (+5 ms)Running coprocessor post-open hooks at 1732754628952 (+3 ms)Region opened successfully at 1732754628953 (+1 ms) 2024-11-28T00:43:48,954 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce., pid=172, masterSystemTime=1732754628933 2024-11-28T00:43:48,954 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149., pid=173, masterSystemTime=1732754628935 2024-11-28T00:43:48,955 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:48,955 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:48,956 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=da45016b67794bb77005d2e661c58149, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:48,956 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:48,956 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:48,956 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=639a0f77d3533d45d64c08f1929affce, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:43:48,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure da45016b67794bb77005d2e661c58149, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:48,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 639a0f77d3533d45d64c08f1929affce, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:43:48,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-28T00:43:48,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure da45016b67794bb77005d2e661c58149, server=97f25515df69,43795,1732754369084 in 176 msec 2024-11-28T00:43:48,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-28T00:43:48,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 639a0f77d3533d45d64c08f1929affce, server=97f25515df69,34347,1732754369169 in 179 msec 2024-11-28T00:43:48,961 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, ASSIGN in 337 msec 2024-11-28T00:43:48,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-11-28T00:43:48,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, ASSIGN in 338 msec 2024-11-28T00:43:48,962 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:43:48,962 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754628962"}]},"ts":"1732754628962"} 2024-11-28T00:43:48,964 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-28T00:43:48,964 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:43:48,964 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-28T00:43:48,967 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:43:49,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-28T00:43:49,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:49,264 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:49,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:49,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:49,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:49,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:49,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:49,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:49,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 892 msec 2024-11-28T00:43:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-28T00:43:49,739 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:43:49,739 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:49,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-28T00:43:49,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:49,745 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:43:49,749 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:49,757 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:49,761 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:49,763 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:43:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754629763 (current time:1732754629763). 2024-11-28T00:43:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:43:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-28T00:43:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:43:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20e9a199, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:49,765 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:49,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:49,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:49,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@603b3ce6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:49,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:49,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:49,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:49,766 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51008, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:49,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54d8707a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:49,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:49,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:49,768 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34948, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:49,769 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:49,770 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e1783e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:49,771 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:49,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:49,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:49,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b1e3149, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:49,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:49,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:49,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:49,773 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51032, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:49,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41becc7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:49,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:49,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:49,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34958, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:49,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:43:49,778 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:49,778 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:43:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:43:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:43:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-28T00:43:49,780 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:43:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-28T00:43:49,781 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:43:49,783 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:43:49,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742245_1421 (size=170) 2024-11-28T00:43:49,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742245_1421 (size=170) 2024-11-28T00:43:49,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742245_1421 (size=170) 2024-11-28T00:43:49,789 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:43:49,789 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce}] 2024-11-28T00:43:49,790 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:49,790 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149 2024-11-28T00:43:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-28T00:43:49,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-11-28T00:43:49,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-11-28T00:43:49,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:49,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:49,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for da45016b67794bb77005d2e661c58149: 2024-11-28T00:43:49,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 639a0f77d3533d45d64c08f1929affce: 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:43:49,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:43:49,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742246_1422 (size=71) 2024-11-28T00:43:49,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742246_1422 (size=71) 2024-11-28T00:43:49,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742247_1423 (size=71) 2024-11-28T00:43:49,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742247_1423 (size=71) 2024-11-28T00:43:49,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742246_1422 (size=71) 2024-11-28T00:43:49,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742247_1423 (size=71) 2024-11-28T00:43:49,958 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:49,958 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-11-28T00:43:49,958 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:49,958 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-28T00:43:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-11-28T00:43:49,959 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region da45016b67794bb77005d2e661c58149 2024-11-28T00:43:49,959 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149 2024-11-28T00:43:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-11-28T00:43:49,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:49,959 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:49,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149 in 171 msec 2024-11-28T00:43:49,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=174 2024-11-28T00:43:49,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce in 171 msec 2024-11-28T00:43:49,962 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:43:49,963 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:43:49,964 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:43:49,964 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:43:49,964 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:49,964 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:43:49,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742248_1424 (size=63) 2024-11-28T00:43:49,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742248_1424 (size=63) 2024-11-28T00:43:49,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742248_1424 (size=63) 2024-11-28T00:43:49,970 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:43:49,970 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:43:49,970 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:43:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742249_1425 (size=653) 2024-11-28T00:43:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742249_1425 (size=653) 2024-11-28T00:43:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742249_1425 (size=653) 2024-11-28T00:43:49,981 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:43:49,985 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:43:49,985 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:43:49,987 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:43:49,987 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-28T00:43:49,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 208 msec 2024-11-28T00:43:50,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-28T00:43:50,099 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:43:50,109 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-28T00:43:50,111 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:43:50,113 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:43:50,114 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:50,116 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-28T00:43:50,117 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:50,117 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:43:50,118 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:50,121 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:50,125 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:50,128 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:43:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754630128 (current time:1732754630128). 2024-11-28T00:43:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:43:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-28T00:43:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:43:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3834b7c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:50,129 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:50,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:50,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:50,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46375e7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:50,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:50,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:50,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:50,130 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51038, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:50,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@277ee8c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:50,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:50,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:50,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:50,132 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34970, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:50,133 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:50,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:50,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:50,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:50,134 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:50,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dda7ee0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:50,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:50,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:50,135 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:50,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:50,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:50,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f3b0c1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:50,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:50,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:50,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:50,136 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51068, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:50,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3515eb68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:50,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:50,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:50,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:50,139 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34980, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:50,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:43:50,142 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:50,142 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:50,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:43:50,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:43:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:43:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-28T00:43:50,145 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:43:50,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-28T00:43:50,147 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:43:50,149 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:43:50,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742250_1426 (size=165) 2024-11-28T00:43:50,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742250_1426 (size=165) 2024-11-28T00:43:50,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742250_1426 (size=165) 2024-11-28T00:43:50,157 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:43:50,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce}] 2024-11-28T00:43:50,158 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:50,158 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149 2024-11-28T00:43:50,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-28T00:43:50,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-11-28T00:43:50,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-11-28T00:43:50,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:50,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:50,310 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing da45016b67794bb77005d2e661c58149 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-28T00:43:50,310 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 639a0f77d3533d45d64c08f1929affce 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-28T00:43:50,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280f384547260845f7a762b1a955667bf0_da45016b67794bb77005d2e661c58149 is 71, key is 005ea437ac7b26dbfbc6c2e76a5a47b5/cf:q/1732754630110/Put/seqid=0 2024-11-28T00:43:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742251_1427 (size=5241) 2024-11-28T00:43:50,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742251_1427 (size=5241) 2024-11-28T00:43:50,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742251_1427 (size=5241) 2024-11-28T00:43:50,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:50,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128001d9a4ebaf9471eb2d124b906013404_639a0f77d3533d45d64c08f1929affce is 71, key is 11f88825136041cc078678c683566341/cf:q/1732754630113/Put/seqid=0 2024-11-28T00:43:50,335 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280f384547260845f7a762b1a955667bf0_da45016b67794bb77005d2e661c58149 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411280f384547260845f7a762b1a955667bf0_da45016b67794bb77005d2e661c58149 2024-11-28T00:43:50,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742252_1428 (size=8031) 2024-11-28T00:43:50,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742252_1428 (size=8031) 2024-11-28T00:43:50,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/.tmp/cf/ed1bb3b961fa48f2b03459dd2de0e2db, store: [table=testtb-testExportExpiredSnapshot family=cf region=da45016b67794bb77005d2e661c58149] 2024-11-28T00:43:50,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742252_1428 (size=8031) 2024-11-28T00:43:50,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:50,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/.tmp/cf/ed1bb3b961fa48f2b03459dd2de0e2db is 209, key is 00fbccb29319eb315c2264adae695f8f1/cf:q/1732754630110/Put/seqid=0 2024-11-28T00:43:50,341 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128001d9a4ebaf9471eb2d124b906013404_639a0f77d3533d45d64c08f1929affce to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241128001d9a4ebaf9471eb2d124b906013404_639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:50,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/.tmp/cf/65ba250b2c1e44328dca2b43ad3b2f35, store: [table=testtb-testExportExpiredSnapshot family=cf region=639a0f77d3533d45d64c08f1929affce] 2024-11-28T00:43:50,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/.tmp/cf/65ba250b2c1e44328dca2b43ad3b2f35 is 209, key is 1be223b19d431356969e89b43a04711ac/cf:q/1732754630113/Put/seqid=0 2024-11-28T00:43:50,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742253_1429 (size=6326) 2024-11-28T00:43:50,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742253_1429 (size=6326) 2024-11-28T00:43:50,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742253_1429 (size=6326) 2024-11-28T00:43:50,346 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/.tmp/cf/ed1bb3b961fa48f2b03459dd2de0e2db 2024-11-28T00:43:50,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742254_1430 (size=14587) 2024-11-28T00:43:50,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742254_1430 (size=14587) 2024-11-28T00:43:50,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742254_1430 (size=14587) 2024-11-28T00:43:50,349 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/.tmp/cf/65ba250b2c1e44328dca2b43ad3b2f35 2024-11-28T00:43:50,351 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/.tmp/cf/ed1bb3b961fa48f2b03459dd2de0e2db as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/cf/ed1bb3b961fa48f2b03459dd2de0e2db 2024-11-28T00:43:50,354 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/.tmp/cf/65ba250b2c1e44328dca2b43ad3b2f35 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/cf/65ba250b2c1e44328dca2b43ad3b2f35 2024-11-28T00:43:50,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/cf/ed1bb3b961fa48f2b03459dd2de0e2db, entries=5, sequenceid=6, filesize=6.2 K 2024-11-28T00:43:50,358 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for da45016b67794bb77005d2e661c58149 in 48ms, sequenceid=6, compaction requested=false 2024-11-28T00:43:50,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for da45016b67794bb77005d2e661c58149: 2024-11-28T00:43:50,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. for snaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:43:50,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:43:50,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:50,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/cf/ed1bb3b961fa48f2b03459dd2de0e2db] hfiles 2024-11-28T00:43:50,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/cf/ed1bb3b961fa48f2b03459dd2de0e2db for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:43:50,359 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/cf/65ba250b2c1e44328dca2b43ad3b2f35, entries=45, sequenceid=6, filesize=14.2 K 2024-11-28T00:43:50,360 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 639a0f77d3533d45d64c08f1929affce in 50ms, sequenceid=6, compaction requested=false 2024-11-28T00:43:50,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 639a0f77d3533d45d64c08f1929affce: 2024-11-28T00:43:50,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. for snaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:43:50,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:43:50,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:50,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/cf/65ba250b2c1e44328dca2b43ad3b2f35] hfiles 2024-11-28T00:43:50,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/cf/65ba250b2c1e44328dca2b43ad3b2f35 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:43:50,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742255_1431 (size=110) 2024-11-28T00:43:50,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742255_1431 (size=110) 2024-11-28T00:43:50,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742255_1431 (size=110) 2024-11-28T00:43:50,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:43:50,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-28T00:43:50,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-11-28T00:43:50,365 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region da45016b67794bb77005d2e661c58149 2024-11-28T00:43:50,365 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149 2024-11-28T00:43:50,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure da45016b67794bb77005d2e661c58149 in 209 msec 2024-11-28T00:43:50,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742256_1432 (size=110) 2024-11-28T00:43:50,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742256_1432 (size=110) 2024-11-28T00:43:50,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742256_1432 (size=110) 2024-11-28T00:43:50,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:43:50,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-11-28T00:43:50,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-11-28T00:43:50,371 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:50,371 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:50,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-28T00:43:50,373 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:43:50,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 639a0f77d3533d45d64c08f1929affce in 214 msec 2024-11-28T00:43:50,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:43:50,375 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:43:50,375 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:43:50,375 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:50,376 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241128001d9a4ebaf9471eb2d124b906013404_639a0f77d3533d45d64c08f1929affce, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411280f384547260845f7a762b1a955667bf0_da45016b67794bb77005d2e661c58149] hfiles 2024-11-28T00:43:50,376 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241128001d9a4ebaf9471eb2d124b906013404_639a0f77d3533d45d64c08f1929affce 2024-11-28T00:43:50,376 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411280f384547260845f7a762b1a955667bf0_da45016b67794bb77005d2e661c58149 2024-11-28T00:43:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742257_1433 (size=294) 2024-11-28T00:43:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742257_1433 (size=294) 2024-11-28T00:43:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742257_1433 (size=294) 2024-11-28T00:43:50,382 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:43:50,382 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-28T00:43:50,383 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-28T00:43:50,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742258_1434 (size=963) 2024-11-28T00:43:50,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742258_1434 (size=963) 2024-11-28T00:43:50,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742258_1434 (size=963) 2024-11-28T00:43:50,393 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:43:50,397 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:43:50,397 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-28T00:43:50,399 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:43:50,399 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-28T00:43:50,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 256 msec 2024-11-28T00:43:50,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-28T00:43:50,458 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:43:50,459 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:43:50,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-28T00:43:50,461 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:43:50,462 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-11-28T00:43:50,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-28T00:43:50,463 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:43:50,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742259_1435 (size=436) 2024-11-28T00:43:50,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742259_1435 (size=436) 2024-11-28T00:43:50,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742259_1435 (size=436) 2024-11-28T00:43:50,471 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a074d787aab5a43a48b3164bc3a7256f, NAME => 'testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:50,471 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => af0adc78f2023f6d163d04caaa3516c7, NAME => 'testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:43:50,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742261_1437 (size=61) 2024-11-28T00:43:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742261_1437 (size=61) 2024-11-28T00:43:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742260_1436 (size=61) 2024-11-28T00:43:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742261_1437 (size=61) 2024-11-28T00:43:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742260_1436 (size=61) 2024-11-28T00:43:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742260_1436 (size=61) 2024-11-28T00:43:50,477 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:50,477 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing a074d787aab5a43a48b3164bc3a7256f, disabling compactions & flushes 2024-11-28T00:43:50,477 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:50,477 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:50,477 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. after waiting 0 ms 2024-11-28T00:43:50,477 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:50,478 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:50,478 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:50,478 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for a074d787aab5a43a48b3164bc3a7256f: Waiting for close lock at 1732754630477Disabling compacts and flushes for region at 1732754630477Disabling writes for close at 1732754630477Writing region close event to WAL at 1732754630478 (+1 ms)Closed at 1732754630478 2024-11-28T00:43:50,478 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing af0adc78f2023f6d163d04caaa3516c7, disabling compactions & flushes 2024-11-28T00:43:50,478 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:50,478 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:50,478 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. after waiting 0 ms 2024-11-28T00:43:50,478 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:50,478 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:50,478 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for af0adc78f2023f6d163d04caaa3516c7: Waiting for close lock at 1732754630478Disabling compacts and flushes for region at 1732754630478Disabling writes for close at 1732754630478Writing region close event to WAL at 1732754630478Closed at 1732754630478 2024-11-28T00:43:50,479 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:43:50,479 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732754630479"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754630479"}]},"ts":"1732754630479"} 2024-11-28T00:43:50,479 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732754630479"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754630479"}]},"ts":"1732754630479"} 2024-11-28T00:43:50,481 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:43:50,481 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:43:50,482 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754630481"}]},"ts":"1732754630481"} 2024-11-28T00:43:50,483 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-28T00:43:50,483 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:43:50,484 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:43:50,484 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:43:50,484 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:43:50,484 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:43:50,484 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:43:50,484 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:43:50,484 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:43:50,484 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:43:50,484 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:43:50,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:43:50,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a074d787aab5a43a48b3164bc3a7256f, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=af0adc78f2023f6d163d04caaa3516c7, ASSIGN}] 2024-11-28T00:43:50,486 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=af0adc78f2023f6d163d04caaa3516c7, ASSIGN 2024-11-28T00:43:50,486 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a074d787aab5a43a48b3164bc3a7256f, ASSIGN 2024-11-28T00:43:50,487 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a074d787aab5a43a48b3164bc3a7256f, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:43:50,487 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=af0adc78f2023f6d163d04caaa3516c7, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:43:50,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-28T00:43:50,637 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:43:50,637 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=af0adc78f2023f6d163d04caaa3516c7, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:43:50,637 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=a074d787aab5a43a48b3164bc3a7256f, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:50,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=af0adc78f2023f6d163d04caaa3516c7, ASSIGN because future has completed 2024-11-28T00:43:50,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure af0adc78f2023f6d163d04caaa3516c7, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:43:50,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a074d787aab5a43a48b3164bc3a7256f, ASSIGN because future has completed 2024-11-28T00:43:50,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure a074d787aab5a43a48b3164bc3a7256f, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:43:50,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-28T00:43:50,799 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:50,799 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => a074d787aab5a43a48b3164bc3a7256f, NAME => 'testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:43:50,799 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. service=AccessControlService 2024-11-28T00:43:50,799 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:50,799 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => af0adc78f2023f6d163d04caaa3516c7, NAME => 'testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. service=AccessControlService 2024-11-28T00:43:50,800 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,800 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,801 INFO [StoreOpener-a074d787aab5a43a48b3164bc3a7256f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,802 INFO [StoreOpener-af0adc78f2023f6d163d04caaa3516c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,803 INFO [StoreOpener-a074d787aab5a43a48b3164bc3a7256f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a074d787aab5a43a48b3164bc3a7256f columnFamilyName cf 2024-11-28T00:43:50,803 INFO [StoreOpener-af0adc78f2023f6d163d04caaa3516c7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af0adc78f2023f6d163d04caaa3516c7 columnFamilyName cf 2024-11-28T00:43:50,803 DEBUG [StoreOpener-a074d787aab5a43a48b3164bc3a7256f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:50,803 DEBUG [StoreOpener-af0adc78f2023f6d163d04caaa3516c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:50,804 INFO [StoreOpener-af0adc78f2023f6d163d04caaa3516c7-1 {}] regionserver.HStore(327): Store=af0adc78f2023f6d163d04caaa3516c7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:50,804 INFO [StoreOpener-a074d787aab5a43a48b3164bc3a7256f-1 {}] regionserver.HStore(327): Store=a074d787aab5a43a48b3164bc3a7256f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:43:50,804 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,804 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,805 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,806 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,807 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,808 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:50,808 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:43:50,808 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened af0adc78f2023f6d163d04caaa3516c7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59017012, jitterRate=-0.12057799100875854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:50,808 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:50,809 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened a074d787aab5a43a48b3164bc3a7256f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63642987, jitterRate=-0.05164559185504913}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:43:50,809 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:50,809 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for a074d787aab5a43a48b3164bc3a7256f: Running coprocessor pre-open hook at 1732754630800Writing region info on filesystem at 1732754630800Initializing all the Stores at 1732754630801 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754630801Cleaning up temporary data from old regions at 1732754630805 (+4 ms)Running coprocessor post-open hooks at 1732754630809 (+4 ms)Region opened successfully at 1732754630809 2024-11-28T00:43:50,809 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for af0adc78f2023f6d163d04caaa3516c7: Running coprocessor pre-open hook at 1732754630800Writing region info on filesystem at 1732754630801 (+1 ms)Initializing all the Stores at 1732754630801Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754630801Cleaning up temporary data from old regions at 1732754630805 (+4 ms)Running coprocessor post-open hooks at 1732754630808 (+3 ms)Region opened successfully at 1732754630809 (+1 ms) 2024-11-28T00:43:50,809 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f., pid=184, masterSystemTime=1732754630791 2024-11-28T00:43:50,809 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7., pid=183, masterSystemTime=1732754630791 2024-11-28T00:43:50,811 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:50,811 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:50,811 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=a074d787aab5a43a48b3164bc3a7256f, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:43:50,811 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:50,811 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:50,812 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=af0adc78f2023f6d163d04caaa3516c7, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:43:50,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure a074d787aab5a43a48b3164bc3a7256f, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:43:50,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure af0adc78f2023f6d163d04caaa3516c7, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:43:50,815 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-11-28T00:43:50,815 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure a074d787aab5a43a48b3164bc3a7256f, server=97f25515df69,43795,1732754369084 in 173 msec 2024-11-28T00:43:50,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-11-28T00:43:50,816 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a074d787aab5a43a48b3164bc3a7256f, ASSIGN in 330 msec 2024-11-28T00:43:50,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure af0adc78f2023f6d163d04caaa3516c7, server=97f25515df69,34347,1732754369169 in 175 msec 2024-11-28T00:43:50,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-11-28T00:43:50,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=af0adc78f2023f6d163d04caaa3516c7, ASSIGN in 331 msec 2024-11-28T00:43:50,818 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:43:50,818 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754630818"}]},"ts":"1732754630818"} 2024-11-28T00:43:50,819 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-28T00:43:50,820 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:43:50,820 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-28T00:43:50,822 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:43:50,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:50,834 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:50,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:50,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:43:50,887 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,887 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,887 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:43:50,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 427 msec 2024-11-28T00:43:51,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-28T00:43:51,088 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-28T00:43:51,088 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:51,091 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-28T00:43:51,091 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:51,091 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:43:51,094 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:51,100 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:51,106 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:51,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:43:51,116 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:43:51,118 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:51,121 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-28T00:43:51,121 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:51,121 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:43:51,122 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:51,127 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:43:51,134 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-28T00:43:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-28T00:43:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:43:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c71d0d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:51,136 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:51,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:51,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:51,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@697093b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:51,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:51,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:51,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:51,137 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:51,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@151a8e90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:51,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:51,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:51,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:51,140 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:51,141 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:51,141 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d0714e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:43:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:43:51,143 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:43:51,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:43:51,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:43:51,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75a1df1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:51,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:43:51,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:43:51,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:51,144 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:43:51,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d3ab3f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:43:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:43:51,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:43:51,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:43:51,147 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35008, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:43:51,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:43:51,150 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:43:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:43:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:43:51,150 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:43:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:43:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:43:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-28T00:43:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-28T00:43:51,154 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:43:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-28T00:43:51,155 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:43:51,157 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:43:51,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742262_1438 (size=152) 2024-11-28T00:43:51,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742262_1438 (size=152) 2024-11-28T00:43:51,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742262_1438 (size=152) 2024-11-28T00:43:51,164 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:43:51,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a074d787aab5a43a48b3164bc3a7256f}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0adc78f2023f6d163d04caaa3516c7}] 2024-11-28T00:43:51,165 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:51,165 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-28T00:43:51,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-11-28T00:43:51,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-11-28T00:43:51,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:51,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:51,319 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing a074d787aab5a43a48b3164bc3a7256f 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-28T00:43:51,319 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing af0adc78f2023f6d163d04caaa3516c7 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-28T00:43:51,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b4ff29eaeee14c5ba029624f144273cd_a074d787aab5a43a48b3164bc3a7256f is 71, key is 011f4c3b57c478cc712bee1cf2fbe3fb/cf:q/1732754631114/Put/seqid=0 2024-11-28T00:43:51,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112840d36140036043f9851f9cb5be2da7b8_af0adc78f2023f6d163d04caaa3516c7 is 71, key is 13de03578160e3a7b4fe5c31b724fe07/cf:q/1732754631116/Put/seqid=0 2024-11-28T00:43:51,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742263_1439 (size=5171) 2024-11-28T00:43:51,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742263_1439 (size=5171) 2024-11-28T00:43:51,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742263_1439 (size=5171) 2024-11-28T00:43:51,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742264_1440 (size=8102) 2024-11-28T00:43:51,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:51,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742264_1440 (size=8102) 2024-11-28T00:43:51,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742264_1440 (size=8102) 2024-11-28T00:43:51,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:51,353 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112840d36140036043f9851f9cb5be2da7b8_af0adc78f2023f6d163d04caaa3516c7 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024112840d36140036043f9851f9cb5be2da7b8_af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:51,353 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b4ff29eaeee14c5ba029624f144273cd_a074d787aab5a43a48b3164bc3a7256f to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241128b4ff29eaeee14c5ba029624f144273cd_a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:51,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/.tmp/cf/263263ef1345403cb37227246cccccb3, store: [table=testExportExpiredSnapshot family=cf region=af0adc78f2023f6d163d04caaa3516c7] 2024-11-28T00:43:51,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/.tmp/cf/2feb4d12a2444b1cba76b28c9eaaeff1, store: [table=testExportExpiredSnapshot family=cf region=a074d787aab5a43a48b3164bc3a7256f] 2024-11-28T00:43:51,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/.tmp/cf/263263ef1345403cb37227246cccccb3 is 202, key is 12326318121589c50b2e9869a917f8dda/cf:q/1732754631116/Put/seqid=0 2024-11-28T00:43:51,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/.tmp/cf/2feb4d12a2444b1cba76b28c9eaaeff1 is 202, key is 0b1ff25c4f44453113ad118dfc6f1429a/cf:q/1732754631114/Put/seqid=0 2024-11-28T00:43:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742266_1442 (size=14465) 2024-11-28T00:43:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742265_1441 (size=6086) 2024-11-28T00:43:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742265_1441 (size=6086) 2024-11-28T00:43:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742265_1441 (size=6086) 2024-11-28T00:43:51,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742266_1442 (size=14465) 2024-11-28T00:43:51,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742266_1442 (size=14465) 2024-11-28T00:43:51,362 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/.tmp/cf/2feb4d12a2444b1cba76b28c9eaaeff1 2024-11-28T00:43:51,362 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/.tmp/cf/263263ef1345403cb37227246cccccb3 2024-11-28T00:43:51,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/.tmp/cf/2feb4d12a2444b1cba76b28c9eaaeff1 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/cf/2feb4d12a2444b1cba76b28c9eaaeff1 2024-11-28T00:43:51,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/.tmp/cf/263263ef1345403cb37227246cccccb3 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/cf/263263ef1345403cb37227246cccccb3 2024-11-28T00:43:51,370 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/cf/2feb4d12a2444b1cba76b28c9eaaeff1, entries=4, sequenceid=5, filesize=5.9 K 2024-11-28T00:43:51,370 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/cf/263263ef1345403cb37227246cccccb3, entries=46, sequenceid=5, filesize=14.1 K 2024-11-28T00:43:51,371 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for a074d787aab5a43a48b3164bc3a7256f in 52ms, sequenceid=5, compaction requested=false 2024-11-28T00:43:51,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-28T00:43:51,371 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for af0adc78f2023f6d163d04caaa3516c7 in 52ms, sequenceid=5, compaction requested=false 2024-11-28T00:43:51,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-28T00:43:51,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for a074d787aab5a43a48b3164bc3a7256f: 2024-11-28T00:43:51,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. for snapshot-testExportExpiredSnapshot completed. 2024-11-28T00:43:51,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for af0adc78f2023f6d163d04caaa3516c7: 2024-11-28T00:43:51,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. for snapshot-testExportExpiredSnapshot completed. 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/cf/2feb4d12a2444b1cba76b28c9eaaeff1] hfiles 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/cf/263263ef1345403cb37227246cccccb3] hfiles 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/cf/2feb4d12a2444b1cba76b28c9eaaeff1 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:43:51,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/cf/263263ef1345403cb37227246cccccb3 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:43:51,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742267_1443 (size=103) 2024-11-28T00:43:51,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742267_1443 (size=103) 2024-11-28T00:43:51,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742268_1444 (size=103) 2024-11-28T00:43:51,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742268_1444 (size=103) 2024-11-28T00:43:51,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742267_1443 (size=103) 2024-11-28T00:43:51,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742268_1444 (size=103) 2024-11-28T00:43:51,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:43:51,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-11-28T00:43:51,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-11-28T00:43:51,380 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:51,381 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:51,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:43:51,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-11-28T00:43:51,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-11-28T00:43:51,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:51,382 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:51,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure af0adc78f2023f6d163d04caaa3516c7 in 217 msec 2024-11-28T00:43:51,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-11-28T00:43:51,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a074d787aab5a43a48b3164bc3a7256f in 219 msec 2024-11-28T00:43:51,384 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:43:51,385 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:43:51,386 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:43:51,386 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:43:51,386 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:43:51,387 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024112840d36140036043f9851f9cb5be2da7b8_af0adc78f2023f6d163d04caaa3516c7, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241128b4ff29eaeee14c5ba029624f144273cd_a074d787aab5a43a48b3164bc3a7256f] hfiles 2024-11-28T00:43:51,387 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024112840d36140036043f9851f9cb5be2da7b8_af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:43:51,387 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241128b4ff29eaeee14c5ba029624f144273cd_a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:43:51,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742269_1445 (size=287) 2024-11-28T00:43:51,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742269_1445 (size=287) 2024-11-28T00:43:51,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742269_1445 (size=287) 2024-11-28T00:43:51,393 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:43:51,393 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-28T00:43:51,394 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-28T00:43:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742270_1446 (size=935) 2024-11-28T00:43:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742270_1446 (size=935) 2024-11-28T00:43:51,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742270_1446 (size=935) 2024-11-28T00:43:51,406 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:43:51,411 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:43:51,411 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-28T00:43:51,412 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:43:51,412 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-28T00:43:51,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 260 msec 2024-11-28T00:43:51,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-28T00:43:51,468 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-28T00:43:52,081 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0007_000001 (auth:SIMPLE) from 127.0.0.1:47926 2024-11-28T00:43:52,091 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000001/launch_container.sh] 2024-11-28T00:43:52,091 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000001/container_tokens] 2024-11-28T00:43:52,091 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0007/container_1732754375741_0007_01_000001/sysfs] 2024-11-28T00:43:53,397 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:43:56,880 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:43:58,441 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-28T00:43:58,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-28T00:43:58,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-28T00:43:58,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-28T00:44:01,478 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754641478 2024-11-28T00:44:01,478 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754641478, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754641478, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:01,518 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:01,518 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754641478, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754641478/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-28T00:44:01,520 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:44:01,522 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:44:01,523 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:44:01,527 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754641526"}]},"ts":"1732754641526"} 2024-11-28T00:44:01,529 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-28T00:44:01,529 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-28T00:44:01,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-28T00:44:01,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, UNASSIGN}] 2024-11-28T00:44:01,532 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, UNASSIGN 2024-11-28T00:44:01,532 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, UNASSIGN 2024-11-28T00:44:01,533 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=639a0f77d3533d45d64c08f1929affce, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:44:01,533 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=da45016b67794bb77005d2e661c58149, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:44:01,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, UNASSIGN because future has completed 2024-11-28T00:44:01,535 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:44:01,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure da45016b67794bb77005d2e661c58149, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:44:01,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, UNASSIGN because future has completed 2024-11-28T00:44:01,536 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:44:01,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 639a0f77d3533d45d64c08f1929affce, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:44:01,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:44:01,688 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close da45016b67794bb77005d2e661c58149 2024-11-28T00:44:01,688 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:44:01,689 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing da45016b67794bb77005d2e661c58149, disabling compactions & flushes 2024-11-28T00:44:01,689 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:44:01,689 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:44:01,689 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. after waiting 0 ms 2024-11-28T00:44:01,689 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:44:01,689 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:44:01,689 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:44:01,689 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 639a0f77d3533d45d64c08f1929affce, disabling compactions & flushes 2024-11-28T00:44:01,689 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:44:01,689 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:44:01,690 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. after waiting 0 ms 2024-11-28T00:44:01,690 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:44:01,692 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:44:01,692 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:44:01,693 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:44:01,693 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:44:01,693 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce. 2024-11-28T00:44:01,693 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149. 2024-11-28T00:44:01,693 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for da45016b67794bb77005d2e661c58149: Waiting for close lock at 1732754641688Running coprocessor pre-close hooks at 1732754641688Disabling compacts and flushes for region at 1732754641689 (+1 ms)Disabling writes for close at 1732754641689Writing region close event to WAL at 1732754641689Running coprocessor post-close hooks at 1732754641693 (+4 ms)Closed at 1732754641693 2024-11-28T00:44:01,693 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 639a0f77d3533d45d64c08f1929affce: Waiting for close lock at 1732754641689Running coprocessor pre-close hooks at 1732754641689Disabling compacts and flushes for region at 1732754641689Disabling writes for close at 1732754641690 (+1 ms)Writing region close event to WAL at 1732754641690Running coprocessor post-close hooks at 1732754641693 (+3 ms)Closed at 1732754641693 2024-11-28T00:44:01,696 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 639a0f77d3533d45d64c08f1929affce 2024-11-28T00:44:01,696 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=639a0f77d3533d45d64c08f1929affce, regionState=CLOSED 2024-11-28T00:44:01,696 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed da45016b67794bb77005d2e661c58149 2024-11-28T00:44:01,697 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=da45016b67794bb77005d2e661c58149, regionState=CLOSED 2024-11-28T00:44:01,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 639a0f77d3533d45d64c08f1929affce, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:44:01,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure da45016b67794bb77005d2e661c58149, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:44:01,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-11-28T00:44:01,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 639a0f77d3533d45d64c08f1929affce, server=97f25515df69,34347,1732754369169 in 163 msec 2024-11-28T00:44:01,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-11-28T00:44:01,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure da45016b67794bb77005d2e661c58149, server=97f25515df69,43795,1732754369084 in 165 msec 2024-11-28T00:44:01,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=639a0f77d3533d45d64c08f1929affce, UNASSIGN in 170 msec 2024-11-28T00:44:01,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-11-28T00:44:01,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=da45016b67794bb77005d2e661c58149, UNASSIGN in 171 msec 2024-11-28T00:44:01,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-11-28T00:44:01,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 175 msec 2024-11-28T00:44:01,714 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754641713"}]},"ts":"1732754641713"} 2024-11-28T00:44:01,716 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-28T00:44:01,716 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-28T00:44:01,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 194 msec 2024-11-28T00:44:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:44:01,838 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:44:01,839 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,840 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,842 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,844 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,847 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149 2024-11-28T00:44:01,847 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce 2024-11-28T00:44:01,849 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/recovered.edits] 2024-11-28T00:44:01,849 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/recovered.edits] 2024-11-28T00:44:01,854 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/cf/ed1bb3b961fa48f2b03459dd2de0e2db to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/cf/ed1bb3b961fa48f2b03459dd2de0e2db 2024-11-28T00:44:01,854 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/cf/65ba250b2c1e44328dca2b43ad3b2f35 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/cf/65ba250b2c1e44328dca2b43ad3b2f35 2024-11-28T00:44:01,857 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149/recovered.edits/9.seqid 2024-11-28T00:44:01,858 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce/recovered.edits/9.seqid 2024-11-28T00:44:01,858 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/da45016b67794bb77005d2e661c58149 2024-11-28T00:44:01,858 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportExpiredSnapshot/639a0f77d3533d45d64c08f1929affce 2024-11-28T00:44:01,858 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-28T00:44:01,859 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-28T00:44:01,860 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-11-28T00:44:01,864 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241128001d9a4ebaf9471eb2d124b906013404_639a0f77d3533d45d64c08f1929affce to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241128001d9a4ebaf9471eb2d124b906013404_639a0f77d3533d45d64c08f1929affce 2024-11-28T00:44:01,865 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411280f384547260845f7a762b1a955667bf0_da45016b67794bb77005d2e661c58149 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411280f384547260845f7a762b1a955667bf0_da45016b67794bb77005d2e661c58149 2024-11-28T00:44:01,866 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-28T00:44:01,868 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,870 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-28T00:44:01,920 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:44:01,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:44:01,922 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:44:01,922 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:44:01,923 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-28T00:44:01,927 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,927 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-28T00:44:01,928 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754641927"}]},"ts":"9223372036854775807"} 2024-11-28T00:44:01,928 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754641927"}]},"ts":"9223372036854775807"} 2024-11-28T00:44:01,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:01,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:01,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:01,931 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,931 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:01,931 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:44:01,931 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => da45016b67794bb77005d2e661c58149, NAME => 'testtb-testExportExpiredSnapshot,,1732754628596.da45016b67794bb77005d2e661c58149.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 639a0f77d3533d45d64c08f1929affce, NAME => 'testtb-testExportExpiredSnapshot,1,1732754628596.639a0f77d3533d45d64c08f1929affce.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:44:01,931 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-28T00:44:01,931 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754641931"}]},"ts":"9223372036854775807"} 2024-11-28T00:44:01,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-11-28T00:44:01,933 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:01,933 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:01,933 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:01,933 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:01,934 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-28T00:44:01,935 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:44:01,936 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 96 msec 2024-11-28T00:44:02,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-11-28T00:44:02,038 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-28T00:44:02,038 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:44:02,047 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-28T00:44:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:44:02,050 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-28T00:44:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-28T00:44:02,054 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-28T00:44:02,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-28T00:44:02,080 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=809 (was 814), OpenFileDescriptor=797 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=620 (was 684), ProcessCount=18 (was 24), AvailableMemoryMB=3612 (was 2073) - AvailableMemoryMB LEAK? - 2024-11-28T00:44:02,080 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-11-28T00:44:02,103 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=809, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=620, ProcessCount=18, AvailableMemoryMB=3612 2024-11-28T00:44:02,103 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-11-28T00:44:02,104 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:44:02,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:02,107 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:44:02,107 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-11-28T00:44:02,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-28T00:44:02,111 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:44:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742271_1447 (size=448) 2024-11-28T00:44:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742271_1447 (size=448) 2024-11-28T00:44:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742271_1447 (size=448) 2024-11-28T00:44:02,121 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 93b1d0c2c2cc82da4cd135609a392dc4, NAME => 'testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:02,121 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e6eed9b0a2ad5ceac984cd81715712fb, NAME => 'testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742273_1449 (size=73) 2024-11-28T00:44:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742272_1448 (size=73) 2024-11-28T00:44:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742273_1449 (size=73) 2024-11-28T00:44:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742272_1448 (size=73) 2024-11-28T00:44:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742272_1448 (size=73) 2024-11-28T00:44:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742273_1449 (size=73) 2024-11-28T00:44:02,130 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:02,130 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing e6eed9b0a2ad5ceac984cd81715712fb, disabling compactions & flushes 2024-11-28T00:44:02,130 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:02,130 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:02,130 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. after waiting 0 ms 2024-11-28T00:44:02,130 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:02,130 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:02,131 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for e6eed9b0a2ad5ceac984cd81715712fb: Waiting for close lock at 1732754642130Disabling compacts and flushes for region at 1732754642130Disabling writes for close at 1732754642130Writing region close event to WAL at 1732754642130Closed at 1732754642130 2024-11-28T00:44:02,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-28T00:44:02,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-28T00:44:02,530 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:02,530 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 93b1d0c2c2cc82da4cd135609a392dc4, disabling compactions & flushes 2024-11-28T00:44:02,530 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:02,530 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:02,530 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. after waiting 0 ms 2024-11-28T00:44:02,530 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:02,530 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:02,530 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 93b1d0c2c2cc82da4cd135609a392dc4: Waiting for close lock at 1732754642530Disabling compacts and flushes for region at 1732754642530Disabling writes for close at 1732754642530Writing region close event to WAL at 1732754642530Closed at 1732754642530 2024-11-28T00:44:02,531 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:44:02,531 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732754642531"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754642531"}]},"ts":"1732754642531"} 2024-11-28T00:44:02,531 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732754642531"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754642531"}]},"ts":"1732754642531"} 2024-11-28T00:44:02,533 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:44:02,534 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:44:02,534 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754642534"}]},"ts":"1732754642534"} 2024-11-28T00:44:02,535 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-28T00:44:02,536 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:44:02,537 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:44:02,537 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:44:02,537 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:44:02,537 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:44:02,537 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:44:02,537 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:44:02,537 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:44:02,537 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:44:02,537 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:44:02,537 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:44:02,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, ASSIGN}] 2024-11-28T00:44:02,538 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, ASSIGN 2024-11-28T00:44:02,538 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, ASSIGN 2024-11-28T00:44:02,539 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:44:02,539 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:44:02,689 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:44:02,689 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=93b1d0c2c2cc82da4cd135609a392dc4, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:44:02,689 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=e6eed9b0a2ad5ceac984cd81715712fb, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:44:02,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, ASSIGN because future has completed 2024-11-28T00:44:02,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:44:02,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, ASSIGN because future has completed 2024-11-28T00:44:02,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:44:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-28T00:44:02,846 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:02,846 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 93b1d0c2c2cc82da4cd135609a392dc4, NAME => 'testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:44:02,847 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. service=AccessControlService 2024-11-28T00:44:02,847 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:44:02,847 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,847 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:02,847 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,847 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,848 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:02,848 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => e6eed9b0a2ad5ceac984cd81715712fb, NAME => 'testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:44:02,848 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. service=AccessControlService 2024-11-28T00:44:02,849 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:44:02,849 INFO [StoreOpener-93b1d0c2c2cc82da4cd135609a392dc4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,849 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,849 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:02,849 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,849 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,850 INFO [StoreOpener-93b1d0c2c2cc82da4cd135609a392dc4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 93b1d0c2c2cc82da4cd135609a392dc4 columnFamilyName cf 2024-11-28T00:44:02,850 INFO [StoreOpener-e6eed9b0a2ad5ceac984cd81715712fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,851 DEBUG [StoreOpener-93b1d0c2c2cc82da4cd135609a392dc4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:02,851 INFO [StoreOpener-e6eed9b0a2ad5ceac984cd81715712fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e6eed9b0a2ad5ceac984cd81715712fb columnFamilyName cf 2024-11-28T00:44:02,852 INFO [StoreOpener-93b1d0c2c2cc82da4cd135609a392dc4-1 {}] regionserver.HStore(327): Store=93b1d0c2c2cc82da4cd135609a392dc4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:44:02,852 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,852 DEBUG [StoreOpener-e6eed9b0a2ad5ceac984cd81715712fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:02,853 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,853 INFO [StoreOpener-e6eed9b0a2ad5ceac984cd81715712fb-1 {}] regionserver.HStore(327): Store=e6eed9b0a2ad5ceac984cd81715712fb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:44:02,853 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,853 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,853 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,853 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,854 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,854 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,855 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,855 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,855 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,856 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,857 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:44:02,858 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:44:02,858 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 93b1d0c2c2cc82da4cd135609a392dc4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67841038, jitterRate=0.010910242795944214}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:44:02,858 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:02,858 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened e6eed9b0a2ad5ceac984cd81715712fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72583149, jitterRate=0.08157320320606232}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:44:02,858 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:02,858 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 93b1d0c2c2cc82da4cd135609a392dc4: Running coprocessor pre-open hook at 1732754642847Writing region info on filesystem at 1732754642848 (+1 ms)Initializing all the Stores at 1732754642848Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754642848Cleaning up temporary data from old regions at 1732754642853 (+5 ms)Running coprocessor post-open hooks at 1732754642858 (+5 ms)Region opened successfully at 1732754642858 2024-11-28T00:44:02,858 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for e6eed9b0a2ad5ceac984cd81715712fb: Running coprocessor pre-open hook at 1732754642849Writing region info on filesystem at 1732754642849Initializing all the Stores at 1732754642850 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754642850Cleaning up temporary data from old regions at 1732754642855 (+5 ms)Running coprocessor post-open hooks at 1732754642858 (+3 ms)Region opened successfully at 1732754642858 2024-11-28T00:44:02,859 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4., pid=198, masterSystemTime=1732754642843 2024-11-28T00:44:02,859 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb., pid=199, masterSystemTime=1732754642844 2024-11-28T00:44:02,865 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:02,865 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:02,865 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=e6eed9b0a2ad5ceac984cd81715712fb, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:44:02,866 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:02,866 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:02,867 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=93b1d0c2c2cc82da4cd135609a392dc4, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:44:02,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:44:02,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:44:02,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-11-28T00:44:02,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb, server=97f25515df69,34347,1732754369169 in 176 msec 2024-11-28T00:44:02,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-11-28T00:44:02,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, ASSIGN in 332 msec 2024-11-28T00:44:02,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4, server=97f25515df69,38585,1732754368938 in 177 msec 2024-11-28T00:44:02,872 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-11-28T00:44:02,872 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, ASSIGN in 334 msec 2024-11-28T00:44:02,873 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:44:02,873 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754642873"}]},"ts":"1732754642873"} 2024-11-28T00:44:02,874 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-28T00:44:02,875 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:44:02,875 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-28T00:44:02,878 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:44:02,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:02,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:02,941 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:02,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:02,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 848 msec 2024-11-28T00:44:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-28T00:44:03,248 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:44:03,248 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,252 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-28T00:44:03,252 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:03,252 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:44:03,254 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,260 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,269 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,272 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:44:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754643273 (current time:1732754643273). 2024-11-28T00:44:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:44:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:44:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:44:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31803d9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:03,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:03,274 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:03,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:03,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:03,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f517cf3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:03,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:03,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:03,277 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41528, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:03,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f95a512, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:03,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:03,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:03,281 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48858, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:03,282 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:03,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:03,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:03,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:03,282 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:03,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f0c6d7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:03,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:03,287 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:03,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:03,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:03,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37258384, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:03,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:03,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:03,293 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41546, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:03,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b0692b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:03,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:03,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:03,302 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48872, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:03,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:44:03,310 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:03,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:03,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:03,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:03,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:44:03,312 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:03,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:44:03,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:44:03,322 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:44:03,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-28T00:44:03,323 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:44:03,328 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:44:03,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-28T00:44:03,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742274_1450 (size=185) 2024-11-28T00:44:03,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742274_1450 (size=185) 2024-11-28T00:44:03,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742274_1450 (size=185) 2024-11-28T00:44:03,366 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:44:03,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb}] 2024-11-28T00:44:03,367 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:03,368 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:03,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-28T00:44:03,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-28T00:44:03,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-11-28T00:44:03,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for e6eed9b0a2ad5ceac984cd81715712fb: 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 93b1d0c2c2cc82da4cd135609a392dc4: 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:44:03,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:44:03,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742275_1451 (size=76) 2024-11-28T00:44:03,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742275_1451 (size=76) 2024-11-28T00:44:03,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742275_1451 (size=76) 2024-11-28T00:44:03,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:03,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-11-28T00:44:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-11-28T00:44:03,534 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:03,534 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:03,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4 in 169 msec 2024-11-28T00:44:03,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742276_1452 (size=76) 2024-11-28T00:44:03,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742276_1452 (size=76) 2024-11-28T00:44:03,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742276_1452 (size=76) 2024-11-28T00:44:03,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:03,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-28T00:44:03,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-28T00:44:03,551 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:03,552 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:03,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-11-28T00:44:03,561 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:44:03,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb in 189 msec 2024-11-28T00:44:03,563 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:44:03,566 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:44:03,566 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:44:03,566 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:03,567 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:44:03,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742277_1453 (size=68) 2024-11-28T00:44:03,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742277_1453 (size=68) 2024-11-28T00:44:03,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742277_1453 (size=68) 2024-11-28T00:44:03,593 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:44:03,593 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:03,594 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:03,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742278_1454 (size=673) 2024-11-28T00:44:03,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742278_1454 (size=673) 2024-11-28T00:44:03,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742278_1454 (size=673) 2024-11-28T00:44:03,635 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:44:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-28T00:44:03,659 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:44:03,659 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:03,661 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:44:03,661 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-28T00:44:03,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 345 msec 2024-11-28T00:44:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-28T00:44:03,968 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:44:03,976 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:44:03,977 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:44:03,978 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,981 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-28T00:44:03,981 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:03,981 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:44:03,983 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,988 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,993 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:44:03,995 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:44:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754643995 (current time:1732754643995). 2024-11-28T00:44:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:44:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:44:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:44:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fb18afd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:03,997 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:03,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:03,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:03,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@758df099, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:03,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:03,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:03,998 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41562, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:03,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75a05f5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:03,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:04,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:04,000 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48882, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:04,001 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:04,001 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@771f03cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:04,003 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:04,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:04,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:04,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc7562f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:04,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:04,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:04,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:04,004 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41574, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:04,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69c86ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:04,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:04,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:04,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:04,006 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:04,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:44:04,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:04,009 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:44:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:44:04,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:44:04,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-28T00:44:04,012 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:44:04,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:44:04,013 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:44:04,015 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:44:04,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742279_1455 (size=180) 2024-11-28T00:44:04,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742279_1455 (size=180) 2024-11-28T00:44:04,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742279_1455 (size=180) 2024-11-28T00:44:04,024 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:44:04,024 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb}] 2024-11-28T00:44:04,025 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:04,025 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:04,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:44:04,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-11-28T00:44:04,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-11-28T00:44:04,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:04,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:04,177 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 93b1d0c2c2cc82da4cd135609a392dc4 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:44:04,177 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing e6eed9b0a2ad5ceac984cd81715712fb 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:44:04,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112855fe6bdb61de4133a422cb9e90cca2b3_93b1d0c2c2cc82da4cd135609a392dc4 is 71, key is 07d00e9b8a98a8102f3ebc091c777030/cf:q/1732754643975/Put/seqid=0 2024-11-28T00:44:04,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411287dbe06b4871e4a439445f76c8a450273_e6eed9b0a2ad5ceac984cd81715712fb is 71, key is 2f98389fd2b7894053851d6dbc7ac2da/cf:q/1732754643977/Put/seqid=0 2024-11-28T00:44:04,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742281_1457 (size=8172) 2024-11-28T00:44:04,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742280_1456 (size=5102) 2024-11-28T00:44:04,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742281_1457 (size=8172) 2024-11-28T00:44:04,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742280_1456 (size=5102) 2024-11-28T00:44:04,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742281_1457 (size=8172) 2024-11-28T00:44:04,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742280_1456 (size=5102) 2024-11-28T00:44:04,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:04,205 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:04,205 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112855fe6bdb61de4133a422cb9e90cca2b3_93b1d0c2c2cc82da4cd135609a392dc4 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024112855fe6bdb61de4133a422cb9e90cca2b3_93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:04,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/.tmp/cf/11f0a603cb9f4816816f092863c42f9f, store: [table=testtb-testEmptyExportFileSystemState family=cf region=93b1d0c2c2cc82da4cd135609a392dc4] 2024-11-28T00:44:04,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/.tmp/cf/11f0a603cb9f4816816f092863c42f9f is 214, key is 0b0890517c860ff5c7d1262b1a19110c3/cf:q/1732754643975/Put/seqid=0 2024-11-28T00:44:04,210 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411287dbe06b4871e4a439445f76c8a450273_e6eed9b0a2ad5ceac984cd81715712fb to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411287dbe06b4871e4a439445f76c8a450273_e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:04,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/.tmp/cf/83bd80b9e193415cb2842b090ee5b68b, store: [table=testtb-testEmptyExportFileSystemState family=cf region=e6eed9b0a2ad5ceac984cd81715712fb] 2024-11-28T00:44:04,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/.tmp/cf/83bd80b9e193415cb2842b090ee5b68b is 214, key is 10b5af44c2187a21488af0ad096be0027/cf:q/1732754643977/Put/seqid=0 2024-11-28T00:44:04,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742282_1458 (size=5938) 2024-11-28T00:44:04,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742282_1458 (size=5938) 2024-11-28T00:44:04,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742282_1458 (size=5938) 2024-11-28T00:44:04,213 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/.tmp/cf/11f0a603cb9f4816816f092863c42f9f 2024-11-28T00:44:04,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742283_1459 (size=15239) 2024-11-28T00:44:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742283_1459 (size=15239) 2024-11-28T00:44:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742283_1459 (size=15239) 2024-11-28T00:44:04,219 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/.tmp/cf/83bd80b9e193415cb2842b090ee5b68b 2024-11-28T00:44:04,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/.tmp/cf/11f0a603cb9f4816816f092863c42f9f as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/cf/11f0a603cb9f4816816f092863c42f9f 2024-11-28T00:44:04,224 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/cf/11f0a603cb9f4816816f092863c42f9f, entries=3, sequenceid=6, filesize=5.8 K 2024-11-28T00:44:04,225 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/.tmp/cf/83bd80b9e193415cb2842b090ee5b68b as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/cf/83bd80b9e193415cb2842b090ee5b68b 2024-11-28T00:44:04,227 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 93b1d0c2c2cc82da4cd135609a392dc4 in 50ms, sequenceid=6, compaction requested=false 2024-11-28T00:44:04,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-28T00:44:04,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 93b1d0c2c2cc82da4cd135609a392dc4: 2024-11-28T00:44:04,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:44:04,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:04,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/cf/11f0a603cb9f4816816f092863c42f9f] hfiles 2024-11-28T00:44:04,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/cf/11f0a603cb9f4816816f092863c42f9f for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,235 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/cf/83bd80b9e193415cb2842b090ee5b68b, entries=47, sequenceid=6, filesize=14.9 K 2024-11-28T00:44:04,236 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for e6eed9b0a2ad5ceac984cd81715712fb in 59ms, sequenceid=6, compaction requested=false 2024-11-28T00:44:04,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for e6eed9b0a2ad5ceac984cd81715712fb: 2024-11-28T00:44:04,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:44:04,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:04,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/cf/83bd80b9e193415cb2842b090ee5b68b] hfiles 2024-11-28T00:44:04,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/cf/83bd80b9e193415cb2842b090ee5b68b for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742284_1460 (size=115) 2024-11-28T00:44:04,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742284_1460 (size=115) 2024-11-28T00:44:04,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742284_1460 (size=115) 2024-11-28T00:44:04,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:04,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-11-28T00:44:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-11-28T00:44:04,271 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:04,271 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:04,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4 in 248 msec 2024-11-28T00:44:04,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742285_1461 (size=115) 2024-11-28T00:44:04,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742285_1461 (size=115) 2024-11-28T00:44:04,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742285_1461 (size=115) 2024-11-28T00:44:04,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:04,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-11-28T00:44:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-11-28T00:44:04,286 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:04,286 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:04,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-11-28T00:44:04,289 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:44:04,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb in 263 msec 2024-11-28T00:44:04,290 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:44:04,291 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:44:04,291 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:44:04,291 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:04,292 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411287dbe06b4871e4a439445f76c8a450273_e6eed9b0a2ad5ceac984cd81715712fb, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024112855fe6bdb61de4133a422cb9e90cca2b3_93b1d0c2c2cc82da4cd135609a392dc4] hfiles 2024-11-28T00:44:04,292 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411287dbe06b4871e4a439445f76c8a450273_e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:04,292 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024112855fe6bdb61de4133a422cb9e90cca2b3_93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:04,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742286_1462 (size=299) 2024-11-28T00:44:04,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742286_1462 (size=299) 2024-11-28T00:44:04,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742286_1462 (size=299) 2024-11-28T00:44:04,304 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:44:04,304 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,305 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742287_1463 (size=983) 2024-11-28T00:44:04,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742287_1463 (size=983) 2024-11-28T00:44:04,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742287_1463 (size=983) 2024-11-28T00:44:04,326 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:44:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:44:04,333 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:44:04,334 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,336 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:44:04,336 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-28T00:44:04,338 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 326 msec 2024-11-28T00:44:04,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:44:04,638 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:44:04,639 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638 2024-11-28T00:44:04,639 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:04,668 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:04,668 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,669 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:44:04,673 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742288_1464 (size=185) 2024-11-28T00:44:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742288_1464 (size=185) 2024-11-28T00:44:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742288_1464 (size=185) 2024-11-28T00:44:04,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742289_1465 (size=673) 2024-11-28T00:44:04,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742289_1465 (size=673) 2024-11-28T00:44:04,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742289_1465 (size=673) 2024-11-28T00:44:04,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:04,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:04,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-3670950673618147391.jar 2024-11-28T00:44:05,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-437267487590066266.jar 2024-11-28T00:44:05,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:05,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:44:05,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:44:05,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:44:05,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:44:05,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:44:05,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:44:05,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:44:05,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:44:05,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:44:05,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:44:05,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:44:05,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:05,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:05,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:44:05,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:05,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:05,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:44:05,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:44:05,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742290_1466 (size=24020) 2024-11-28T00:44:05,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742290_1466 (size=24020) 2024-11-28T00:44:05,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742290_1466 (size=24020) 2024-11-28T00:44:05,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742291_1467 (size=77755) 2024-11-28T00:44:05,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742291_1467 (size=77755) 2024-11-28T00:44:05,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742291_1467 (size=77755) 2024-11-28T00:44:05,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742292_1468 (size=131360) 2024-11-28T00:44:05,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742292_1468 (size=131360) 2024-11-28T00:44:05,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742292_1468 (size=131360) 2024-11-28T00:44:05,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742293_1469 (size=111793) 2024-11-28T00:44:05,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742293_1469 (size=111793) 2024-11-28T00:44:05,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742293_1469 (size=111793) 2024-11-28T00:44:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742294_1470 (size=1832290) 2024-11-28T00:44:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742294_1470 (size=1832290) 2024-11-28T00:44:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742294_1470 (size=1832290) 2024-11-28T00:44:05,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742295_1471 (size=6424743) 2024-11-28T00:44:05,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742295_1471 (size=6424743) 2024-11-28T00:44:05,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742295_1471 (size=6424743) 2024-11-28T00:44:05,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742296_1472 (size=8360005) 2024-11-28T00:44:05,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742296_1472 (size=8360005) 2024-11-28T00:44:05,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742296_1472 (size=8360005) 2024-11-28T00:44:05,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742297_1473 (size=503880) 2024-11-28T00:44:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742297_1473 (size=503880) 2024-11-28T00:44:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742297_1473 (size=503880) 2024-11-28T00:44:05,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742298_1474 (size=322274) 2024-11-28T00:44:05,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742298_1474 (size=322274) 2024-11-28T00:44:05,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742298_1474 (size=322274) 2024-11-28T00:44:05,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742299_1475 (size=20406) 2024-11-28T00:44:05,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742299_1475 (size=20406) 2024-11-28T00:44:05,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742299_1475 (size=20406) 2024-11-28T00:44:06,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742300_1476 (size=45609) 2024-11-28T00:44:06,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742300_1476 (size=45609) 2024-11-28T00:44:06,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742300_1476 (size=45609) 2024-11-28T00:44:06,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742301_1477 (size=136454) 2024-11-28T00:44:06,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742301_1477 (size=136454) 2024-11-28T00:44:06,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742301_1477 (size=136454) 2024-11-28T00:44:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742302_1478 (size=1597136) 2024-11-28T00:44:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742302_1478 (size=1597136) 2024-11-28T00:44:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742302_1478 (size=1597136) 2024-11-28T00:44:06,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742303_1479 (size=30873) 2024-11-28T00:44:06,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742303_1479 (size=30873) 2024-11-28T00:44:06,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742303_1479 (size=30873) 2024-11-28T00:44:06,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742304_1480 (size=29229) 2024-11-28T00:44:06,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742304_1480 (size=29229) 2024-11-28T00:44:06,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742304_1480 (size=29229) 2024-11-28T00:44:06,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742305_1481 (size=903861) 2024-11-28T00:44:06,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742305_1481 (size=903861) 2024-11-28T00:44:06,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742305_1481 (size=903861) 2024-11-28T00:44:06,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742306_1482 (size=5175431) 2024-11-28T00:44:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742306_1482 (size=5175431) 2024-11-28T00:44:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742306_1482 (size=5175431) 2024-11-28T00:44:06,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742307_1483 (size=232881) 2024-11-28T00:44:06,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742307_1483 (size=232881) 2024-11-28T00:44:06,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742307_1483 (size=232881) 2024-11-28T00:44:06,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742308_1484 (size=1323991) 2024-11-28T00:44:06,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742308_1484 (size=1323991) 2024-11-28T00:44:06,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742308_1484 (size=1323991) 2024-11-28T00:44:06,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742309_1485 (size=4695811) 2024-11-28T00:44:06,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742309_1485 (size=4695811) 2024-11-28T00:44:06,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742309_1485 (size=4695811) 2024-11-28T00:44:06,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742310_1486 (size=1877034) 2024-11-28T00:44:06,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742310_1486 (size=1877034) 2024-11-28T00:44:06,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742310_1486 (size=1877034) 2024-11-28T00:44:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742311_1487 (size=440956) 2024-11-28T00:44:06,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742311_1487 (size=440956) 2024-11-28T00:44:06,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742311_1487 (size=440956) 2024-11-28T00:44:06,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742312_1488 (size=217555) 2024-11-28T00:44:06,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742312_1488 (size=217555) 2024-11-28T00:44:06,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742312_1488 (size=217555) 2024-11-28T00:44:06,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742313_1489 (size=4188619) 2024-11-28T00:44:06,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742313_1489 (size=4188619) 2024-11-28T00:44:06,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742313_1489 (size=4188619) 2024-11-28T00:44:06,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742314_1490 (size=127628) 2024-11-28T00:44:06,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742314_1490 (size=127628) 2024-11-28T00:44:06,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742314_1490 (size=127628) 2024-11-28T00:44:06,222 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:44:06,224 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-28T00:44:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742315_1491 (size=7) 2024-11-28T00:44:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742315_1491 (size=7) 2024-11-28T00:44:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742315_1491 (size=7) 2024-11-28T00:44:06,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742316_1492 (size=10) 2024-11-28T00:44:06,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742316_1492 (size=10) 2024-11-28T00:44:06,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742316_1492 (size=10) 2024-11-28T00:44:06,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742317_1493 (size=303978) 2024-11-28T00:44:06,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742317_1493 (size=303978) 2024-11-28T00:44:06,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742317_1493 (size=303978) 2024-11-28T00:44:06,302 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:44:06,302 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:44:06,846 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0008_000001 (auth:SIMPLE) from 127.0.0.1:51584 2024-11-28T00:44:07,237 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:44:08,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-28T00:44:08,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-28T00:44:08,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-28T00:44:13,945 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:44:14,448 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0008_000001 (auth:SIMPLE) from 127.0.0.1:53852 2024-11-28T00:44:14,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742318_1494 (size=349652) 2024-11-28T00:44:14,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742318_1494 (size=349652) 2024-11-28T00:44:14,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742318_1494 (size=349652) 2024-11-28T00:44:15,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742319_1495 (size=8568) 2024-11-28T00:44:15,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742319_1495 (size=8568) 2024-11-28T00:44:15,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742319_1495 (size=8568) 2024-11-28T00:44:15,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742320_1496 (size=460) 2024-11-28T00:44:15,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742320_1496 (size=460) 2024-11-28T00:44:15,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742320_1496 (size=460) 2024-11-28T00:44:15,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742321_1497 (size=8568) 2024-11-28T00:44:15,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742321_1497 (size=8568) 2024-11-28T00:44:15,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742321_1497 (size=8568) 2024-11-28T00:44:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742322_1498 (size=349652) 2024-11-28T00:44:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742322_1498 (size=349652) 2024-11-28T00:44:16,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742322_1498 (size=349652) 2024-11-28T00:44:17,520 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:44:17,522 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:44:17,527 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:17,527 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:44:17,528 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:44:17,528 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:17,528 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-28T00:44:17,528 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-28T00:44:17,528 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:17,529 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-28T00:44:17,529 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754644638/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-28T00:44:17,534 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-28T00:44:17,538 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754657538"}]},"ts":"1732754657538"} 2024-11-28T00:44:17,540 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-28T00:44:17,540 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-28T00:44:17,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-28T00:44:17,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, UNASSIGN}] 2024-11-28T00:44:17,543 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, UNASSIGN 2024-11-28T00:44:17,543 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, UNASSIGN 2024-11-28T00:44:17,544 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=e6eed9b0a2ad5ceac984cd81715712fb, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:44:17,545 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=93b1d0c2c2cc82da4cd135609a392dc4, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:44:17,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, UNASSIGN because future has completed 2024-11-28T00:44:17,546 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:44:17,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:44:17,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, UNASSIGN because future has completed 2024-11-28T00:44:17,547 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:44:17,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:44:17,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-28T00:44:17,699 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:17,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:44:17,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing e6eed9b0a2ad5ceac984cd81715712fb, disabling compactions & flushes 2024-11-28T00:44:17,699 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:17,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:17,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. after waiting 0 ms 2024-11-28T00:44:17,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:17,699 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:17,700 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:44:17,700 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 93b1d0c2c2cc82da4cd135609a392dc4, disabling compactions & flushes 2024-11-28T00:44:17,700 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:17,700 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:17,700 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. after waiting 0 ms 2024-11-28T00:44:17,700 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:17,703 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:44:17,703 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:44:17,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:44:17,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:44:17,704 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4. 2024-11-28T00:44:17,704 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb. 2024-11-28T00:44:17,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for e6eed9b0a2ad5ceac984cd81715712fb: Waiting for close lock at 1732754657699Running coprocessor pre-close hooks at 1732754657699Disabling compacts and flushes for region at 1732754657699Disabling writes for close at 1732754657699Writing region close event to WAL at 1732754657700 (+1 ms)Running coprocessor post-close hooks at 1732754657704 (+4 ms)Closed at 1732754657704 2024-11-28T00:44:17,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 93b1d0c2c2cc82da4cd135609a392dc4: Waiting for close lock at 1732754657700Running coprocessor pre-close hooks at 1732754657700Disabling compacts and flushes for region at 1732754657700Disabling writes for close at 1732754657700Writing region close event to WAL at 1732754657700Running coprocessor post-close hooks at 1732754657704 (+4 ms)Closed at 1732754657704 2024-11-28T00:44:17,706 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:17,706 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=93b1d0c2c2cc82da4cd135609a392dc4, regionState=CLOSED 2024-11-28T00:44:17,706 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:17,707 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=e6eed9b0a2ad5ceac984cd81715712fb, regionState=CLOSED 2024-11-28T00:44:17,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:44:17,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:44:17,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=208 2024-11-28T00:44:17,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 93b1d0c2c2cc82da4cd135609a392dc4, server=97f25515df69,38585,1732754368938 in 162 msec 2024-11-28T00:44:17,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=93b1d0c2c2cc82da4cd135609a392dc4, UNASSIGN in 170 msec 2024-11-28T00:44:17,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-11-28T00:44:17,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure e6eed9b0a2ad5ceac984cd81715712fb, server=97f25515df69,34347,1732754369169 in 165 msec 2024-11-28T00:44:17,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-11-28T00:44:17,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e6eed9b0a2ad5ceac984cd81715712fb, UNASSIGN in 171 msec 2024-11-28T00:44:17,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-11-28T00:44:17,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 175 msec 2024-11-28T00:44:17,720 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754657720"}]},"ts":"1732754657720"} 2024-11-28T00:44:17,722 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-28T00:44:17,722 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-28T00:44:17,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 189 msec 2024-11-28T00:44:17,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-28T00:44:17,858 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:44:17,859 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,861 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,862 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,867 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:17,867 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:17,868 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/recovered.edits] 2024-11-28T00:44:17,868 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/recovered.edits] 2024-11-28T00:44:17,872 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/cf/11f0a603cb9f4816816f092863c42f9f to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/cf/11f0a603cb9f4816816f092863c42f9f 2024-11-28T00:44:17,872 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/cf/83bd80b9e193415cb2842b090ee5b68b to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/cf/83bd80b9e193415cb2842b090ee5b68b 2024-11-28T00:44:17,874 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb/recovered.edits/9.seqid 2024-11-28T00:44:17,874 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4/recovered.edits/9.seqid 2024-11-28T00:44:17,875 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:17,875 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testEmptyExportFileSystemState/93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:17,875 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-28T00:44:17,875 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-28T00:44:17,876 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-11-28T00:44:17,882 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411287dbe06b4871e4a439445f76c8a450273_e6eed9b0a2ad5ceac984cd81715712fb to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411287dbe06b4871e4a439445f76c8a450273_e6eed9b0a2ad5ceac984cd81715712fb 2024-11-28T00:44:17,883 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024112855fe6bdb61de4133a422cb9e90cca2b3_93b1d0c2c2cc82da4cd135609a392dc4 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024112855fe6bdb61de4133a422cb9e90cca2b3_93b1d0c2c2cc82da4cd135609a392dc4 2024-11-28T00:44:17,884 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-28T00:44:17,886 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:17,889 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-28T00:44:18,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,192 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,194 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:44:18,194 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:44:18,194 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-28T00:44:18,194 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:44:18,194 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:44:18,195 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,195 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-28T00:44:18,195 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754658195"}]},"ts":"9223372036854775807"} 2024-11-28T00:44:18,196 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754658195"}]},"ts":"9223372036854775807"} 2024-11-28T00:44:18,198 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:44:18,198 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 93b1d0c2c2cc82da4cd135609a392dc4, NAME => 'testtb-testEmptyExportFileSystemState,,1732754642104.93b1d0c2c2cc82da4cd135609a392dc4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e6eed9b0a2ad5ceac984cd81715712fb, NAME => 'testtb-testEmptyExportFileSystemState,1,1732754642104.e6eed9b0a2ad5ceac984cd81715712fb.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:44:18,198 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-28T00:44:18,198 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754658198"}]},"ts":"9223372036854775807"} 2024-11-28T00:44:18,200 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-28T00:44:18,201 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 342 msec 2024-11-28T00:44:18,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,328 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,328 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-11-28T00:44:18,330 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,330 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,330 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,330 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:44:18,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,339 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-28T00:44:18,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:18,343 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-28T00:44:18,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-28T00:44:18,367 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=812 (was 809) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1535280759_1 at /127.0.0.1:60960 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1535280759_1 at /127.0.0.1:48332 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39253 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:60978 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:35415 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:48344 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7342 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39253 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:43418 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 147043) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=666 (was 620) - SystemLoadAverage LEAK? -, ProcessCount=24 (was 18) - ProcessCount LEAK? -, AvailableMemoryMB=2111 (was 3612) 2024-11-28T00:44:18,367 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-11-28T00:44:18,386 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=812, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=666, ProcessCount=24, AvailableMemoryMB=2109 2024-11-28T00:44:18,386 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-11-28T00:44:18,388 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:44:18,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:44:18,390 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:44:18,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-11-28T00:44:18,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-28T00:44:18,391 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:44:18,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742323_1499 (size=440) 2024-11-28T00:44:18,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742323_1499 (size=440) 2024-11-28T00:44:18,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742323_1499 (size=440) 2024-11-28T00:44:18,408 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ff027b3be0ce05f3c83052bcab03d6f5, NAME => 'testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:18,408 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => fc518652ec3b7fd6e62c4676cbff2f68, NAME => 'testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:18,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742324_1500 (size=65) 2024-11-28T00:44:18,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742324_1500 (size=65) 2024-11-28T00:44:18,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742325_1501 (size=65) 2024-11-28T00:44:18,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742325_1501 (size=65) 2024-11-28T00:44:18,425 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:18,425 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing ff027b3be0ce05f3c83052bcab03d6f5, disabling compactions & flushes 2024-11-28T00:44:18,425 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:18,426 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:18,426 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. after waiting 0 ms 2024-11-28T00:44:18,426 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:18,426 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:18,426 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for ff027b3be0ce05f3c83052bcab03d6f5: Waiting for close lock at 1732754658425Disabling compacts and flushes for region at 1732754658425Disabling writes for close at 1732754658426 (+1 ms)Writing region close event to WAL at 1732754658426Closed at 1732754658426 2024-11-28T00:44:18,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742325_1501 (size=65) 2024-11-28T00:44:18,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742324_1500 (size=65) 2024-11-28T00:44:18,429 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:18,429 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing fc518652ec3b7fd6e62c4676cbff2f68, disabling compactions & flushes 2024-11-28T00:44:18,429 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:18,429 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:18,429 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. after waiting 0 ms 2024-11-28T00:44:18,429 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:18,429 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:18,429 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for fc518652ec3b7fd6e62c4676cbff2f68: Waiting for close lock at 1732754658429Disabling compacts and flushes for region at 1732754658429Disabling writes for close at 1732754658429Writing region close event to WAL at 1732754658429Closed at 1732754658429 2024-11-28T00:44:18,431 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:44:18,431 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732754658431"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754658431"}]},"ts":"1732754658431"} 2024-11-28T00:44:18,431 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732754658431"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754658431"}]},"ts":"1732754658431"} 2024-11-28T00:44:18,434 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:44:18,435 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:44:18,435 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754658435"}]},"ts":"1732754658435"} 2024-11-28T00:44:18,437 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-28T00:44:18,438 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:44:18,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:44:18,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:44:18,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:44:18,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:44:18,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:44:18,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:44:18,439 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:44:18,439 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:44:18,439 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:44:18,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:44:18,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, ASSIGN}] 2024-11-28T00:44:18,441 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, ASSIGN 2024-11-28T00:44:18,441 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, ASSIGN 2024-11-28T00:44:18,442 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, ASSIGN; state=OFFLINE, location=97f25515df69,43795,1732754369084; forceNewPlan=false, retain=false 2024-11-28T00:44:18,442 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:44:18,444 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-28T00:44:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-28T00:44:18,592 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:44:18,593 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=fc518652ec3b7fd6e62c4676cbff2f68, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:44:18,593 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=OPENING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:44:18,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, ASSIGN because future has completed 2024-11-28T00:44:18,595 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:44:18,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, ASSIGN because future has completed 2024-11-28T00:44:18,596 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:44:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-28T00:44:18,750 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:18,750 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => ff027b3be0ce05f3c83052bcab03d6f5, NAME => 'testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:44:18,750 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. service=AccessControlService 2024-11-28T00:44:18,750 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:44:18,750 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,750 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:18,751 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,751 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,751 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:18,751 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => fc518652ec3b7fd6e62c4676cbff2f68, NAME => 'testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:44:18,751 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. service=AccessControlService 2024-11-28T00:44:18,751 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:44:18,752 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,752 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:18,752 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,752 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,752 INFO [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,753 INFO [StoreOpener-fc518652ec3b7fd6e62c4676cbff2f68-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,753 INFO [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff027b3be0ce05f3c83052bcab03d6f5 columnFamilyName cf 2024-11-28T00:44:18,754 INFO [StoreOpener-fc518652ec3b7fd6e62c4676cbff2f68-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fc518652ec3b7fd6e62c4676cbff2f68 columnFamilyName cf 2024-11-28T00:44:18,754 DEBUG [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:18,754 DEBUG [StoreOpener-fc518652ec3b7fd6e62c4676cbff2f68-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:18,754 INFO [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] regionserver.HStore(327): Store=ff027b3be0ce05f3c83052bcab03d6f5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:44:18,754 INFO [StoreOpener-fc518652ec3b7fd6e62c4676cbff2f68-1 {}] regionserver.HStore(327): Store=fc518652ec3b7fd6e62c4676cbff2f68/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:44:18,754 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,754 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,755 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,755 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,755 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,755 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,764 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,765 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,765 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,765 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,766 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,766 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,768 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:44:18,768 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:44:18,768 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened fc518652ec3b7fd6e62c4676cbff2f68; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72019423, jitterRate=0.07317303121089935}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:44:18,768 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened ff027b3be0ce05f3c83052bcab03d6f5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67426229, jitterRate=0.004729107022285461}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:44:18,768 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:18,768 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:18,769 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for ff027b3be0ce05f3c83052bcab03d6f5: Running coprocessor pre-open hook at 1732754658751Writing region info on filesystem at 1732754658751Initializing all the Stores at 1732754658751Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754658751Cleaning up temporary data from old regions at 1732754658765 (+14 ms)Running coprocessor post-open hooks at 1732754658768 (+3 ms)Region opened successfully at 1732754658769 (+1 ms) 2024-11-28T00:44:18,769 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for fc518652ec3b7fd6e62c4676cbff2f68: Running coprocessor pre-open hook at 1732754658752Writing region info on filesystem at 1732754658752Initializing all the Stores at 1732754658752Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754658752Cleaning up temporary data from old regions at 1732754658765 (+13 ms)Running coprocessor post-open hooks at 1732754658768 (+3 ms)Region opened successfully at 1732754658769 (+1 ms) 2024-11-28T00:44:18,769 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5., pid=217, masterSystemTime=1732754658747 2024-11-28T00:44:18,770 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68., pid=216, masterSystemTime=1732754658747 2024-11-28T00:44:18,771 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:18,771 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:18,772 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=fc518652ec3b7fd6e62c4676cbff2f68, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:44:18,772 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:18,772 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:18,773 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:44:18,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:44:18,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:44:18,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-11-28T00:44:18,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68, server=97f25515df69,34347,1732754369169 in 181 msec 2024-11-28T00:44:18,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-11-28T00:44:18,780 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, ASSIGN in 338 msec 2024-11-28T00:44:18,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,43795,1732754369084 in 181 msec 2024-11-28T00:44:18,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-11-28T00:44:18,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, ASSIGN in 340 msec 2024-11-28T00:44:18,783 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:44:18,783 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754658783"}]},"ts":"1732754658783"} 2024-11-28T00:44:18,785 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-28T00:44:18,786 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:44:18,787 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-28T00:44:18,790 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-28T00:44:18,874 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:44:18,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,889 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,889 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:44:18,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 500 msec 2024-11-28T00:44:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-28T00:44:19,028 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:44:19,028 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-28T00:44:19,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:19,030 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:44:19,032 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,036 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,041 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,044 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:44:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754659044 (current time:1732754659044). 2024-11-28T00:44:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:44:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-28T00:44:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:44:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e8213ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:19,045 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:19,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:19,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:19,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@481c1d3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:19,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:19,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,047 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54634, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:19,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5afafa6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:19,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:19,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:19,049 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33812, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:19,050 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,050 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad44d3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:19,052 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:19,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:19,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:19,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30c974b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:19,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:19,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,053 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54658, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:19,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13590caa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:19,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:19,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:19,056 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33824, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:19,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:44:19,058 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,058 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-28T00:44:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:44:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:44:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-28T00:44:19,061 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:44:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-28T00:44:19,062 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:44:19,064 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:44:19,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742326_1502 (size=161) 2024-11-28T00:44:19,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742326_1502 (size=161) 2024-11-28T00:44:19,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742326_1502 (size=161) 2024-11-28T00:44:19,070 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:44:19,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68}] 2024-11-28T00:44:19,070 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,071 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-28T00:44:19,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-11-28T00:44:19,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-11-28T00:44:19,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:19,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:19,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for fc518652ec3b7fd6e62c4676cbff2f68: 2024-11-28T00:44:19,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for ff027b3be0ce05f3c83052bcab03d6f5: 2024-11-28T00:44:19,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. for emptySnaptb0-testExportWithChecksum completed. 2024-11-28T00:44:19,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. for emptySnaptb0-testExportWithChecksum completed. 2024-11-28T00:44:19,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-28T00:44:19,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-28T00:44:19,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:19,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:19,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:44:19,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:44:19,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742328_1504 (size=68) 2024-11-28T00:44:19,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742328_1504 (size=68) 2024-11-28T00:44:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742328_1504 (size=68) 2024-11-28T00:44:19,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:19,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-11-28T00:44:19,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-11-28T00:44:19,229 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,229 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5 in 160 msec 2024-11-28T00:44:19,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742327_1503 (size=68) 2024-11-28T00:44:19,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742327_1503 (size=68) 2024-11-28T00:44:19,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742327_1503 (size=68) 2024-11-28T00:44:19,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:19,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-11-28T00:44:19,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-11-28T00:44:19,234 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,235 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-11-28T00:44:19,237 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:44:19,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68 in 165 msec 2024-11-28T00:44:19,238 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:44:19,239 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:44:19,239 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:44:19,239 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:19,239 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:44:19,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742329_1505 (size=60) 2024-11-28T00:44:19,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742329_1505 (size=60) 2024-11-28T00:44:19,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742329_1505 (size=60) 2024-11-28T00:44:19,245 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:44:19,245 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-28T00:44:19,245 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-28T00:44:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742330_1506 (size=641) 2024-11-28T00:44:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742330_1506 (size=641) 2024-11-28T00:44:19,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742330_1506 (size=641) 2024-11-28T00:44:19,256 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:44:19,260 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:44:19,260 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-28T00:44:19,262 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:44:19,262 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-28T00:44:19,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 203 msec 2024-11-28T00:44:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-28T00:44:19,378 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:44:19,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43795 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:44:19,386 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:44:19,387 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,390 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-28T00:44:19,390 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:19,390 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:44:19,392 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,398 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,405 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:44:19,408 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:44:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754659408 (current time:1732754659408). 2024-11-28T00:44:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:44:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-28T00:44:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:44:19,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27e1d973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:19,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:19,409 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e49737e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,411 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54666, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:19,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e3c1df6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:19,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:19,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:19,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33832, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:19,415 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,415 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:19,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bfb89b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:44:19,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:44:19,417 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:44:19,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:44:19,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:44:19,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4df10f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:44:19,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:44:19,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,418 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54686, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:44:19,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@108ad6fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:44:19,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:44:19,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1] 2024-11-28T00:44:19,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:19,421 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33840, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:19,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:44:19,425 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:44:19,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:44:19,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:44:19,425 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:44:19,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-28T00:44:19,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:44:19,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:44:19,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-28T00:44:19,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-28T00:44:19,428 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:44:19,429 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:44:19,431 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:44:19,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742331_1507 (size=156) 2024-11-28T00:44:19,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742331_1507 (size=156) 2024-11-28T00:44:19,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742331_1507 (size=156) 2024-11-28T00:44:19,439 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:44:19,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68}] 2024-11-28T00:44:19,440 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,440 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-28T00:44:19,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-11-28T00:44:19,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43795 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-11-28T00:44:19,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:19,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:19,592 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing ff027b3be0ce05f3c83052bcab03d6f5 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-28T00:44:19,592 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing fc518652ec3b7fd6e62c4676cbff2f68 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-28T00:44:19,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 is 71, key is 0a5a0d5614f6530efc8b6f3ded66c0bf/cf:q/1732754659384/Put/seqid=0 2024-11-28T00:44:19,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 is 71, key is 1411c25b4a572969284420cbadcb96ce/cf:q/1732754659386/Put/seqid=0 2024-11-28T00:44:19,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742333_1509 (size=8241) 2024-11-28T00:44:19,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742333_1509 (size=8241) 2024-11-28T00:44:19,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742332_1508 (size=5032) 2024-11-28T00:44:19,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742333_1509 (size=8241) 2024-11-28T00:44:19,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742332_1508 (size=5032) 2024-11-28T00:44:19,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742332_1508 (size=5032) 2024-11-28T00:44:19,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:19,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:19,623 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/.tmp/cf/9be64de79a1b41b79e1a68311c2a46d2, store: [table=testtb-testExportWithChecksum family=cf region=ff027b3be0ce05f3c83052bcab03d6f5] 2024-11-28T00:44:19,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/.tmp/cf/9be64de79a1b41b79e1a68311c2a46d2 is 206, key is 058b1f13d7e89d7e48b148e69f0f80e85/cf:q/1732754659384/Put/seqid=0 2024-11-28T00:44:19,631 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/.tmp/cf/8f6ae53e4ebb48dc96d22ffe315dcd34, store: [table=testtb-testExportWithChecksum family=cf region=fc518652ec3b7fd6e62c4676cbff2f68] 2024-11-28T00:44:19,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/.tmp/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 is 206, key is 1e55c599b0ad89bfb39e83bed52269399/cf:q/1732754659386/Put/seqid=0 2024-11-28T00:44:19,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742334_1510 (size=5700) 2024-11-28T00:44:19,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742334_1510 (size=5700) 2024-11-28T00:44:19,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742334_1510 (size=5700) 2024-11-28T00:44:19,641 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/.tmp/cf/9be64de79a1b41b79e1a68311c2a46d2 2024-11-28T00:44:19,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742335_1511 (size=15055) 2024-11-28T00:44:19,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742335_1511 (size=15055) 2024-11-28T00:44:19,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742335_1511 (size=15055) 2024-11-28T00:44:19,644 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/.tmp/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 2024-11-28T00:44:19,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/.tmp/cf/9be64de79a1b41b79e1a68311c2a46d2 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 2024-11-28T00:44:19,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/.tmp/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 2024-11-28T00:44:19,653 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34, entries=48, sequenceid=6, filesize=14.7 K 2024-11-28T00:44:19,653 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2, entries=2, sequenceid=6, filesize=5.6 K 2024-11-28T00:44:19,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for fc518652ec3b7fd6e62c4676cbff2f68 in 62ms, sequenceid=6, compaction requested=false 2024-11-28T00:44:19,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for ff027b3be0ce05f3c83052bcab03d6f5 in 62ms, sequenceid=6, compaction requested=false 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for fc518652ec3b7fd6e62c4676cbff2f68: 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for ff027b3be0ce05f3c83052bcab03d6f5: 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. for snaptb0-testExportWithChecksum completed. 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. for snaptb0-testExportWithChecksum completed. 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34] hfiles 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:44:19,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2] hfiles 2024-11-28T00:44:19,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:44:19,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742336_1512 (size=107) 2024-11-28T00:44:19,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742336_1512 (size=107) 2024-11-28T00:44:19,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742336_1512 (size=107) 2024-11-28T00:44:19,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:19,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-11-28T00:44:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-11-28T00:44:19,663 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,663 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5 in 225 msec 2024-11-28T00:44:19,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742337_1513 (size=107) 2024-11-28T00:44:19,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742337_1513 (size=107) 2024-11-28T00:44:19,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742337_1513 (size=107) 2024-11-28T00:44:19,668 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:44:19,668 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-11-28T00:44:19,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-11-28T00:44:19,668 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,669 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-11-28T00:44:19,672 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:44:19,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68 in 231 msec 2024-11-28T00:44:19,673 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:44:19,674 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:44:19,674 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:44:19,674 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:19,675 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5] hfiles 2024-11-28T00:44:19,675 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:44:19,675 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:19,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742338_1514 (size=291) 2024-11-28T00:44:19,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742338_1514 (size=291) 2024-11-28T00:44:19,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742338_1514 (size=291) 2024-11-28T00:44:19,688 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:44:19,688 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-28T00:44:19,689 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:44:19,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742339_1515 (size=951) 2024-11-28T00:44:19,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742339_1515 (size=951) 2024-11-28T00:44:19,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742339_1515 (size=951) 2024-11-28T00:44:19,705 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:44:19,712 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:44:19,712 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-28T00:44:19,714 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:44:19,714 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-28T00:44:19,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 288 msec 2024-11-28T00:44:19,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-28T00:44:19,748 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:44:19,749 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748 2024-11-28T00:44:19,749 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:19,791 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:44:19,791 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@17a4db9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:44:19,793 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:44:19,797 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:44:19,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:19,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:19,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,856 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-13254273696999451309.jar 2024-11-28T00:44:20,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-7311230572183640074.jar 2024-11-28T00:44:20,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,921 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,921 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,921 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:44:20,922 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:44:20,922 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:44:20,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:44:20,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:44:20,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:44:20,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:44:20,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:44:20,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:44:20,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:44:20,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:44:20,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:44:20,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:20,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:20,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:44:20,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:20,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:44:20,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:44:20,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:44:20,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742340_1516 (size=24020) 2024-11-28T00:44:20,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742340_1516 (size=24020) 2024-11-28T00:44:20,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742340_1516 (size=24020) 2024-11-28T00:44:21,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742341_1517 (size=77755) 2024-11-28T00:44:21,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742341_1517 (size=77755) 2024-11-28T00:44:21,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742341_1517 (size=77755) 2024-11-28T00:44:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742342_1518 (size=131360) 2024-11-28T00:44:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742342_1518 (size=131360) 2024-11-28T00:44:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742342_1518 (size=131360) 2024-11-28T00:44:21,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742343_1519 (size=111793) 2024-11-28T00:44:21,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742343_1519 (size=111793) 2024-11-28T00:44:21,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742343_1519 (size=111793) 2024-11-28T00:44:21,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742344_1520 (size=1832290) 2024-11-28T00:44:21,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742344_1520 (size=1832290) 2024-11-28T00:44:21,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742344_1520 (size=1832290) 2024-11-28T00:44:21,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742345_1521 (size=8360005) 2024-11-28T00:44:21,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742345_1521 (size=8360005) 2024-11-28T00:44:21,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742345_1521 (size=8360005) 2024-11-28T00:44:21,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742346_1522 (size=503880) 2024-11-28T00:44:21,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742346_1522 (size=503880) 2024-11-28T00:44:21,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742346_1522 (size=503880) 2024-11-28T00:44:21,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742347_1523 (size=322274) 2024-11-28T00:44:21,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742347_1523 (size=322274) 2024-11-28T00:44:21,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742347_1523 (size=322274) 2024-11-28T00:44:21,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742348_1524 (size=20406) 2024-11-28T00:44:21,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742348_1524 (size=20406) 2024-11-28T00:44:21,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742348_1524 (size=20406) 2024-11-28T00:44:21,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742349_1525 (size=6424743) 2024-11-28T00:44:21,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742349_1525 (size=6424743) 2024-11-28T00:44:21,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742349_1525 (size=6424743) 2024-11-28T00:44:21,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742350_1526 (size=45609) 2024-11-28T00:44:21,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742350_1526 (size=45609) 2024-11-28T00:44:21,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742350_1526 (size=45609) 2024-11-28T00:44:21,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742351_1527 (size=136454) 2024-11-28T00:44:21,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742351_1527 (size=136454) 2024-11-28T00:44:21,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742351_1527 (size=136454) 2024-11-28T00:44:21,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742352_1528 (size=1597136) 2024-11-28T00:44:21,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742352_1528 (size=1597136) 2024-11-28T00:44:21,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742352_1528 (size=1597136) 2024-11-28T00:44:21,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742353_1529 (size=30873) 2024-11-28T00:44:21,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742353_1529 (size=30873) 2024-11-28T00:44:21,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742353_1529 (size=30873) 2024-11-28T00:44:21,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742354_1530 (size=29229) 2024-11-28T00:44:21,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742354_1530 (size=29229) 2024-11-28T00:44:21,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742354_1530 (size=29229) 2024-11-28T00:44:21,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742355_1531 (size=903861) 2024-11-28T00:44:21,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742355_1531 (size=903861) 2024-11-28T00:44:21,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742355_1531 (size=903861) 2024-11-28T00:44:21,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742356_1532 (size=5175431) 2024-11-28T00:44:21,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742356_1532 (size=5175431) 2024-11-28T00:44:21,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742356_1532 (size=5175431) 2024-11-28T00:44:21,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742357_1533 (size=232881) 2024-11-28T00:44:21,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742357_1533 (size=232881) 2024-11-28T00:44:21,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742357_1533 (size=232881) 2024-11-28T00:44:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742358_1534 (size=440956) 2024-11-28T00:44:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742358_1534 (size=440956) 2024-11-28T00:44:21,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742358_1534 (size=440956) 2024-11-28T00:44:21,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742359_1535 (size=1323991) 2024-11-28T00:44:21,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742359_1535 (size=1323991) 2024-11-28T00:44:21,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742359_1535 (size=1323991) 2024-11-28T00:44:21,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742360_1536 (size=4695811) 2024-11-28T00:44:21,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742360_1536 (size=4695811) 2024-11-28T00:44:21,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742360_1536 (size=4695811) 2024-11-28T00:44:21,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742361_1537 (size=1877034) 2024-11-28T00:44:21,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742361_1537 (size=1877034) 2024-11-28T00:44:21,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742361_1537 (size=1877034) 2024-11-28T00:44:21,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742362_1538 (size=217555) 2024-11-28T00:44:21,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742362_1538 (size=217555) 2024-11-28T00:44:21,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742362_1538 (size=217555) 2024-11-28T00:44:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742363_1539 (size=4188619) 2024-11-28T00:44:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742363_1539 (size=4188619) 2024-11-28T00:44:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742363_1539 (size=4188619) 2024-11-28T00:44:21,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742364_1540 (size=127628) 2024-11-28T00:44:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742364_1540 (size=127628) 2024-11-28T00:44:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742364_1540 (size=127628) 2024-11-28T00:44:21,745 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:44:21,746 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-28T00:44:21,748 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-11-28T00:44:21,748 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:44:21,748 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-11-28T00:44:21,748 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-28T00:44:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742365_1541 (size=1023) 2024-11-28T00:44:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742365_1541 (size=1023) 2024-11-28T00:44:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742365_1541 (size=1023) 2024-11-28T00:44:21,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742366_1542 (size=35) 2024-11-28T00:44:21,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742366_1542 (size=35) 2024-11-28T00:44:21,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742366_1542 (size=35) 2024-11-28T00:44:21,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742367_1543 (size=304125) 2024-11-28T00:44:21,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742367_1543 (size=304125) 2024-11-28T00:44:21,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742367_1543 (size=304125) 2024-11-28T00:44:22,421 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:44:22,421 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:44:22,427 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0008_000001 (auth:SIMPLE) from 127.0.0.1:55096 2024-11-28T00:44:22,447 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0008/container_1732754375741_0008_01_000001/launch_container.sh] 2024-11-28T00:44:22,447 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0008/container_1732754375741_0008_01_000001/container_tokens] 2024-11-28T00:44:22,447 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0008/container_1732754375741_0008_01_000001/sysfs] 2024-11-28T00:44:23,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:43038 2024-11-28T00:44:23,206 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:44:26,881 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:44:28,441 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-28T00:44:28,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-28T00:44:29,052 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-11-28T00:44:29,122 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-28T00:44:29,204 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=30, evicted=0, evictedPerRun=0.0 2024-11-28T00:44:29,308 DEBUG [master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-28T00:44:29,309 DEBUG [master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-28T00:44:30,362 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:56208 2024-11-28T00:44:30,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742368_1544 (size=349823) 2024-11-28T00:44:30,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742368_1544 (size=349823) 2024-11-28T00:44:30,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742368_1544 (size=349823) 2024-11-28T00:44:31,001 INFO [regionserver/97f25515df69:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-28T00:44:31,006 INFO [regionserver/97f25515df69:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-28T00:44:31,026 INFO [regionserver/97f25515df69:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-28T00:44:32,702 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:44026 2024-11-28T00:44:32,704 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:46336 2024-11-28T00:44:32,977 INFO [regionserver/97f25515df69:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 172099 ms 2024-11-28T00:44:33,495 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:46338 2024-11-28T00:44:33,510 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:44028 2024-11-28T00:44:33,572 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region af0adc78f2023f6d163d04caaa3516c7 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:44:33,573 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region fc518652ec3b7fd6e62c4676cbff2f68 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:44:33,573 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a074d787aab5a43a48b3164bc3a7256f changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:44:33,573 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ff027b3be0ce05f3c83052bcab03d6f5 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:44:33,576 DEBUG [master/97f25515df69:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-28T00:44:33,577 INFO [master/97f25515df69:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-28T00:44:33,577 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-28T00:44:33,577 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:44:33,578 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 2 regions 2024-11-28T00:44:33,578 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:44:33,578 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 4 regions 2024-11-28T00:44:33,578 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:44:33,578 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:44:33,578 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:44:33,578 INFO [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:44:33,578 INFO [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:44:33,578 INFO [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:44:33,578 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-11-28T00:44:33,582 INFO [master/97f25515df69:0.Chore.1 {}] balancer.StochasticLoadBalancer(370): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.33333333333333337, need balance); 2024-11-28T00:44:33,582 INFO [master/97f25515df69:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.4947970366745957, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.33333333333333337, need balance); computedMaxSteps=14400 2024-11-28T00:44:33,860 INFO [master/97f25515df69:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 281 ms to try 14400 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.4947970366745957 to a new imbalance of 0.017672622564662982. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.821074384418005, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8577153282654146, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-28T00:44:33,867 INFO [master/97f25515df69:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 2, the balance interval is 150000 ms, and the max number regions in transition is 6 2024-11-28T00:44:33,867 INFO [master/97f25515df69:0.Chore.1 {}] master.HMaster(2172): balance hri=1588230740, source=97f25515df69,43795,1732754369084, destination=97f25515df69,38585,1732754368938 2024-11-28T00:44:33,873 DEBUG [master/97f25515df69:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-28T00:44:33,874 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-28T00:44:33,875 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:44:33,877 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 97f25515df69,43795,1732754369084, state=CLOSING 2024-11-28T00:44:33,918 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=97f25515df69,43795,1732754369084, table=hbase:meta, region=1588230740. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T00:44:34,019 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=97f25515df69,43795,1732754369084, table=hbase:meta, region=1588230740. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T00:44:34,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,099 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,099 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:34,099 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:34,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,099 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:34,099 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:34,099 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-28T00:44:34,099 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:44:34,100 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1588230740, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:44:34,255 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(122): Close 1588230740 2024-11-28T00:44:34,256 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:44:34,256 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-28T00:44:34,256 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-28T00:44:34,256 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-28T00:44:34,256 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T00:44:34,256 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T00:44:34,256 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=76.94 KB heapSize=121.80 KB 2024-11-28T00:44:34,291 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/info/caedc12e03cd44a288b8d3c3170ec0d2 is 181, key is testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68./info:regioninfo/1732754658772/Put/seqid=0 2024-11-28T00:44:34,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742369_1545 (size=17550) 2024-11-28T00:44:34,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742369_1545 (size=17550) 2024-11-28T00:44:34,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742369_1545 (size=17550) 2024-11-28T00:44:34,312 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.78 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/info/caedc12e03cd44a288b8d3c3170ec0d2 2024-11-28T00:44:34,338 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/ns/de3a77db88f142c283e6cd4d3b702681 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48./ns:/1732754628045/DeleteFamily/seqid=0 2024-11-28T00:44:34,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742370_1546 (size=7924) 2024-11-28T00:44:34,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742370_1546 (size=7924) 2024-11-28T00:44:34,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742370_1546 (size=7924) 2024-11-28T00:44:34,344 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/ns/de3a77db88f142c283e6cd4d3b702681 2024-11-28T00:44:34,365 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/rep_barrier/ed4075257e6c43dda2fc71fc7a2866d8 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48./rep_barrier:/1732754628045/DeleteFamily/seqid=0 2024-11-28T00:44:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742371_1547 (size=8195) 2024-11-28T00:44:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742371_1547 (size=8195) 2024-11-28T00:44:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742371_1547 (size=8195) 2024-11-28T00:44:34,371 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/rep_barrier/ed4075257e6c43dda2fc71fc7a2866d8 2024-11-28T00:44:34,413 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/table/fd6da1531c3b4f448f9a1cb52b7d26b7 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732754608383.d68e3d8f5d5916d06b2877ee6efc9c48./table:/1732754628045/DeleteFamily/seqid=0 2024-11-28T00:44:34,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742372_1548 (size=9051) 2024-11-28T00:44:34,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742372_1548 (size=9051) 2024-11-28T00:44:34,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742372_1548 (size=9051) 2024-11-28T00:44:34,446 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/table/fd6da1531c3b4f448f9a1cb52b7d26b7 2024-11-28T00:44:34,452 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/info/caedc12e03cd44a288b8d3c3170ec0d2 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/info/caedc12e03cd44a288b8d3c3170ec0d2 2024-11-28T00:44:34,460 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/info/caedc12e03cd44a288b8d3c3170ec0d2, entries=96, sequenceid=211, filesize=17.1 K 2024-11-28T00:44:34,463 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/ns/de3a77db88f142c283e6cd4d3b702681 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/ns/de3a77db88f142c283e6cd4d3b702681 2024-11-28T00:44:34,473 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/ns/de3a77db88f142c283e6cd4d3b702681, entries=24, sequenceid=211, filesize=7.7 K 2024-11-28T00:44:34,475 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/rep_barrier/ed4075257e6c43dda2fc71fc7a2866d8 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/rep_barrier/ed4075257e6c43dda2fc71fc7a2866d8 2024-11-28T00:44:34,500 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/rep_barrier/ed4075257e6c43dda2fc71fc7a2866d8, entries=22, sequenceid=211, filesize=8.0 K 2024-11-28T00:44:34,502 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/table/fd6da1531c3b4f448f9a1cb52b7d26b7 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/table/fd6da1531c3b4f448f9a1cb52b7d26b7 2024-11-28T00:44:34,512 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/table/fd6da1531c3b4f448f9a1cb52b7d26b7, entries=39, sequenceid=211, filesize=8.8 K 2024-11-28T00:44:34,513 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(3140): Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 257ms, sequenceid=211, compaction requested=false 2024-11-28T00:44:34,533 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/recovered.edits/214.seqid, newMaxSeqId=214, maxSeqId=1 2024-11-28T00:44:34,534 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:44:34,534 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T00:44:34,534 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-28T00:44:34,534 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732754674256Running coprocessor pre-close hooks at 1732754674256Disabling compacts and flushes for region at 1732754674256Disabling writes for close at 1732754674256Obtaining lock to block concurrent updates at 1732754674256Preparing flush snapshotting stores in 1588230740 at 1732754674256Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=78789, getHeapSize=124664, getOffHeapSize=0, getCellsCount=595 at 1732754674257 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732754674260 (+3 ms)Flushing 1588230740/info: creating writer at 1732754674260Flushing 1588230740/info: appending metadata at 1732754674290 (+30 ms)Flushing 1588230740/info: closing flushed file at 1732754674290Flushing 1588230740/ns: creating writer at 1732754674317 (+27 ms)Flushing 1588230740/ns: appending metadata at 1732754674337 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1732754674337Flushing 1588230740/rep_barrier: creating writer at 1732754674350 (+13 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732754674364 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732754674364Flushing 1588230740/table: creating writer at 1732754674385 (+21 ms)Flushing 1588230740/table: appending metadata at 1732754674411 (+26 ms)Flushing 1588230740/table: closing flushed file at 1732754674411Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ba14148: reopening flushed file at 1732754674451 (+40 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@341107cc: reopening flushed file at 1732754674460 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e9eabf2: reopening flushed file at 1732754674474 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3682f8b4: reopening flushed file at 1732754674500 (+26 ms)Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 257ms, sequenceid=211, compaction requested=false at 1732754674513 (+13 ms)Writing region close event to WAL at 1732754674518 (+5 ms)Running coprocessor post-close hooks at 1732754674534 (+16 ms)Closed at 1732754674534 2024-11-28T00:44:34,535 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionServer(3302): Adding 1588230740 move to 97f25515df69,38585,1732754368938 record at close sequenceid=211 2024-11-28T00:44:34,538 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(157): Closed 1588230740 2024-11-28T00:44:34,539 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSED 2024-11-28T00:44:34,542 WARN [PEWorker-3 {}] zookeeper.MetaTableLocator(168): Tried to set null ServerName in hbase:meta; skipping -- ServerName required 2024-11-28T00:44:34,542 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=225, ppid=224, state=RUNNABLE, hasLock=true; CloseRegionProcedure 1588230740, server=97f25515df69,43795,1732754369084 2024-11-28T00:44:34,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-11-28T00:44:34,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 1588230740, server=97f25515df69,43795,1732754369084 in 444 msec 2024-11-28T00:44:34,551 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE; state=CLOSED, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:44:34,702 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:44:34,703 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:44:34,704 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 97f25515df69,38585,1732754368938, state=OPENING 2024-11-28T00:44:34,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:34,890 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:34,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:34,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:34,892 DEBUG [PEWorker-1 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-28T00:44:34,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:44:34,893 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:35,062 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-28T00:44:35,062 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:44:35,063 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-28T00:44:35,065 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=97f25515df69%2C38585%2C1732754368938.meta, suffix=.meta, logDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,38585,1732754368938, archiveDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs, maxLogs=32 2024-11-28T00:44:35,109 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,38585,1732754368938/97f25515df69%2C38585%2C1732754368938.meta.1732754675067.meta, exclude list is [], retry=0 2024-11-28T00:44:35,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38327,DS-2532e3ab-f4be-4df1-b6e1-47d70b0c33f2,DISK] 2024-11-28T00:44:35,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45777,DS-56b9181d-a8cf-4fc2-96de-125d1cb09aa3,DISK] 2024-11-28T00:44:35,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46245,DS-9a7ec647-91e4-4d14-be05-a67d2857cf91,DISK] 2024-11-28T00:44:35,150 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/WALs/97f25515df69,38585,1732754368938/97f25515df69%2C38585%2C1732754368938.meta.1732754675067.meta 2024-11-28T00:44:35,150 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42575:42575),(127.0.0.1/127.0.0.1:33685:33685),(127.0.0.1/127.0.0.1:40703:40703)] 2024-11-28T00:44:35,151 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:44:35,151 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-28T00:44:35,152 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:44:35,152 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-28T00:44:35,152 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-28T00:44:35,152 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-28T00:44:35,153 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-28T00:44:35,153 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:35,153 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-28T00:44:35,153 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-28T00:44:35,155 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T00:44:35,156 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T00:44:35,157 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:35,174 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/info/caedc12e03cd44a288b8d3c3170ec0d2 2024-11-28T00:44:35,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:44:35,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-28T00:44:35,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-28T00:44:35,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:35,196 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/ns/de3a77db88f142c283e6cd4d3b702681 2024-11-28T00:44:35,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:44:35,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T00:44:35,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T00:44:35,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:35,222 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/rep_barrier/ed4075257e6c43dda2fc71fc7a2866d8 2024-11-28T00:44:35,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:44:35,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T00:44:35,231 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T00:44:35,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:35,239 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/table/fd6da1531c3b4f448f9a1cb52b7d26b7 2024-11-28T00:44:35,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:44:35,240 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-28T00:44:35,241 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740 2024-11-28T00:44:35,248 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740 2024-11-28T00:44:35,251 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-28T00:44:35,251 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-28T00:44:35,252 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T00:44:35,253 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-28T00:44:35,254 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=215; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71649408, jitterRate=0.06765937805175781}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T00:44:35,254 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-28T00:44:35,255 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732754675153Writing region info on filesystem at 1732754675153Initializing all the Stores at 1732754675154 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754675154Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754675155 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754675155Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732754675155Cleaning up temporary data from old regions at 1732754675251 (+96 ms)Running coprocessor post-open hooks at 1732754675254 (+3 ms)Region opened successfully at 1732754675255 (+1 ms) 2024-11-28T00:44:35,256 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=226, masterSystemTime=1732754675044 2024-11-28T00:44:35,263 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=215, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:44:35,263 DEBUG [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-28T00:44:35,263 INFO [RS_OPEN_META-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-28T00:44:35,264 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 97f25515df69,38585,1732754368938, state=OPEN 2024-11-28T00:44:35,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:35,288 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:35,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:35,288 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:35,289 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:35,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:35,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:44:35,289 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=226, ppid=224, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=97f25515df69,38585,1732754368938 2024-11-28T00:44:35,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:44:35,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-11-28T00:44:35,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=97f25515df69,38585,1732754368938 in 397 msec 2024-11-28T00:44:35,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE in 1.4270 sec 2024-11-28T00:44:35,392 INFO [master/97f25515df69:0.Chore.1 {}] master.HMaster(2172): balance hri=ff027b3be0ce05f3c83052bcab03d6f5, source=97f25515df69,43795,1732754369084, destination=97f25515df69,38585,1732754368938 2024-11-28T00:44:35,394 DEBUG [master/97f25515df69:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, REOPEN/MOVE 2024-11-28T00:44:35,394 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, REOPEN/MOVE 2024-11-28T00:44:35,396 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=CLOSING, regionLocation=97f25515df69,43795,1732754369084 2024-11-28T00:44:35,400 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] ipc.CallRunner(138): callId: 400 service: ClientService methodName: Mutate size: 306 connection: 172.17.0.3:56977 deadline: 1732754735397, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:44:35,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:44:35,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:44:35,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=97f25515df69,38585,1732754368938, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:44:35,441 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=97f25515df69,43795,1732754369084, table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T00:44:35,513 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:44:35,537 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39157, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:44:35,546 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=97f25515df69,43795,1732754369084, table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T00:44:35,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, REOPEN/MOVE because future has completed 2024-11-28T00:44:35,553 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:44:35,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,43795,1732754369084}] 2024-11-28T00:44:35,710 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:35,710 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:44:35,711 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing ff027b3be0ce05f3c83052bcab03d6f5, disabling compactions & flushes 2024-11-28T00:44:35,711 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:35,711 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:35,711 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. after waiting 0 ms 2024-11-28T00:44:35,711 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:35,745 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:44:35,746 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:44:35,746 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:35,746 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for ff027b3be0ce05f3c83052bcab03d6f5: Waiting for close lock at 1732754675710Running coprocessor pre-close hooks at 1732754675710Disabling compacts and flushes for region at 1732754675711 (+1 ms)Disabling writes for close at 1732754675711Writing region close event to WAL at 1732754675733 (+22 ms)Running coprocessor post-close hooks at 1732754675746 (+13 ms)Closed at 1732754675746 2024-11-28T00:44:35,747 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegionServer(3302): Adding ff027b3be0ce05f3c83052bcab03d6f5 move to 97f25515df69,38585,1732754368938 record at close sequenceid=6 2024-11-28T00:44:35,761 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:35,763 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=CLOSED 2024-11-28T00:44:35,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,43795,1732754369084 because future has completed 2024-11-28T00:44:35,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-11-28T00:44:35,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,43795,1732754369084 in 220 msec 2024-11-28T00:44:35,778 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, REOPEN/MOVE; state=CLOSED, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:44:35,800 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a074d787aab5a43a48b3164bc3a7256f, had cached 0 bytes from a total of 6086 2024-11-28T00:44:35,800 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region af0adc78f2023f6d163d04caaa3516c7, had cached 0 bytes from a total of 14465 2024-11-28T00:44:35,929 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:44:35,929 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:44:35,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, REOPEN/MOVE because future has completed 2024-11-28T00:44:35,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:44:36,094 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:36,094 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7752): Opening region: {ENCODED => ff027b3be0ce05f3c83052bcab03d6f5, NAME => 'testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:44:36,094 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. service=AccessControlService 2024-11-28T00:44:36,094 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:44:36,094 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,095 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:44:36,095 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7794): checking encryption for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,095 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7797): checking classloading for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,113 INFO [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,120 INFO [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff027b3be0ce05f3c83052bcab03d6f5 columnFamilyName cf 2024-11-28T00:44:36,122 DEBUG [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:44:36,148 DEBUG [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 2024-11-28T00:44:36,149 INFO [StoreOpener-ff027b3be0ce05f3c83052bcab03d6f5-1 {}] regionserver.HStore(327): Store=ff027b3be0ce05f3c83052bcab03d6f5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:44:36,149 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1038): replaying wal for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,150 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,151 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,152 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1048): stopping wal replay for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,152 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1060): Cleaning up temporary data for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,155 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1093): writing seq id for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,158 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1114): Opened ff027b3be0ce05f3c83052bcab03d6f5; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68614847, jitterRate=0.022440895438194275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:44:36,158 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:44:36,158 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1006): Region open journal for ff027b3be0ce05f3c83052bcab03d6f5: Running coprocessor pre-open hook at 1732754676095Writing region info on filesystem at 1732754676095Initializing all the Stores at 1732754676096 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754676096Cleaning up temporary data from old regions at 1732754676152 (+56 ms)Running coprocessor post-open hooks at 1732754676158 (+6 ms)Region opened successfully at 1732754676158 2024-11-28T00:44:36,159 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5., pid=229, masterSystemTime=1732754676086 2024-11-28T00:44:36,161 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:36,161 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:44:36,162 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=OPEN, openSeqNum=10, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:44:36,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:44:36,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-11-28T00:44:36,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; OpenRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,38585,1732754368938 in 233 msec 2024-11-28T00:44:36,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, REOPEN/MOVE in 777 msec 2024-11-28T00:44:36,197 DEBUG [master/97f25515df69:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-28T00:44:36,226 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-28T00:44:36,227 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-28T00:44:36,243 DEBUG [master/97f25515df69:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-28T00:44:36,445 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0009_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:44:37,286 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:44:39,976 INFO [regionserver/97f25515df69:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. because 47c99a40beabefa004915b0134843184/l has an old edit so flush to free WALs after random delay 142863 ms Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:44:42,201 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000003/launch_container.sh] 2024-11-28T00:44:42,201 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000003/container_tokens] 2024-11-28T00:44:42,201 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000003/sysfs] 2024-11-28T00:44:43,556 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:52518 2024-11-28T00:44:43,570 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:54440 2024-11-28T00:44:44,850 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000004/launch_container.sh] 2024-11-28T00:44:44,851 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000004/container_tokens] 2024-11-28T00:44:44,851 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:44:46,569 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:52530 2024-11-28T00:44:46,570 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:54452 2024-11-28T00:44:47,389 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000005/launch_container.sh] 2024-11-28T00:44:47,389 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000005/container_tokens] 2024-11-28T00:44:47,389 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000005/sysfs] 2024-11-28T00:44:49,903 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000002/launch_container.sh] 2024-11-28T00:44:49,904 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000002/container_tokens] 2024-11-28T00:44:49,904 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:44:54,248 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000008/launch_container.sh] 2024-11-28T00:44:54,250 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000008/container_tokens] 2024-11-28T00:44:54,251 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000008/sysfs] 2024-11-28T00:44:54,503 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000007/launch_container.sh] 2024-11-28T00:44:54,503 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000007/container_tokens] 2024-11-28T00:44:54,503 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000007/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:44:55,606 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:36658 2024-11-28T00:44:55,606 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:36636 2024-11-28T00:44:56,882 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:44:58,047 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000009/launch_container.sh] 2024-11-28T00:44:58,047 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000009/container_tokens] 2024-11-28T00:44:58,047 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000009/sysfs] 2024-11-28T00:44:58,181 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000010/launch_container.sh] 2024-11-28T00:44:58,181 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000010/container_tokens] 2024-11-28T00:44:58,182 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000010/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:44:59,644 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:36668 2024-11-28T00:44:59,649 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:36644 2024-11-28T00:45:03,752 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fc518652ec3b7fd6e62c4676cbff2f68, had cached 0 bytes from a total of 15055 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:45:05,584 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000012/launch_container.sh] 2024-11-28T00:45:05,584 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000012/container_tokens] 2024-11-28T00:45:05,584 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000012/sysfs] 2024-11-28T00:45:06,315 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000011/launch_container.sh] 2024-11-28T00:45:06,315 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000011/container_tokens] 2024-11-28T00:45:06,315 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000011/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:45:06,655 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:51028 2024-11-28T00:45:07,663 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:51030 2024-11-28T00:45:09,496 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0009_01_000017 while processing FINISH_CONTAINERS event 2024-11-28T00:45:10,261 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0009_01_000018 while processing FINISH_CONTAINERS event 2024-11-28T00:45:11,169 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000014/launch_container.sh] 2024-11-28T00:45:11,169 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000014/container_tokens] 2024-11-28T00:45:11,170 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000014/sysfs] 2024-11-28T00:45:11,209 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000013/launch_container.sh] 2024-11-28T00:45:11,209 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000013/container_tokens] 2024-11-28T00:45:11,209 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000013/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/local-export-1732754659748/archive/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:45:12,736 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:58550 2024-11-28T00:45:12,738 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:59564 2024-11-28T00:45:16,037 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:58556 2024-11-28T00:45:16,037 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:59568 2024-11-28T00:45:16,037 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:59572 2024-11-28T00:45:16,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742374_1550 (size=49124) 2024-11-28T00:45:16,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742374_1550 (size=49124) 2024-11-28T00:45:16,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742374_1550 (size=49124) 2024-11-28T00:45:16,269 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732754375741_0009_01_000016 is : 143 2024-11-28T00:45:16,287 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_1/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000016/sysfs] 2024-11-28T00:45:16,295 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732754375741_0009_01_000020 is : 143 2024-11-28T00:45:16,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742375_1551 (size=461) 2024-11-28T00:45:16,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742375_1551 (size=461) 2024-11-28T00:45:16,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742375_1551 (size=461) 2024-11-28T00:45:16,320 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000020/launch_container.sh] 2024-11-28T00:45:16,320 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000020/container_tokens] 2024-11-28T00:45:16,320 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000020/sysfs] 2024-11-28T00:45:16,322 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000015/launch_container.sh] 2024-11-28T00:45:16,322 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000015/container_tokens] 2024-11-28T00:45:16,322 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000015/sysfs] 2024-11-28T00:45:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742376_1552 (size=49124) 2024-11-28T00:45:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742376_1552 (size=49124) 2024-11-28T00:45:16,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742376_1552 (size=49124) 2024-11-28T00:45:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742377_1553 (size=349823) 2024-11-28T00:45:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742377_1553 (size=349823) 2024-11-28T00:45:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742377_1553 (size=349823) 2024-11-28T00:45:16,377 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:59576 2024-11-28T00:45:16,500 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732754375741_0009_01_000019 is : 143 2024-11-28T00:45:16,505 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000019/sysfs] 2024-11-28T00:45:17,486 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732754375741_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:45:17,488 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488 2024-11-28T00:45:17,488 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:45:17,536 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:45:17,536 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:45:17,539 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:45:17,561 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:45:17,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742378_1554 (size=951) 2024-11-28T00:45:17,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742378_1554 (size=951) 2024-11-28T00:45:17,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742378_1554 (size=951) 2024-11-28T00:45:17,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742379_1555 (size=156) 2024-11-28T00:45:17,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742379_1555 (size=156) 2024-11-28T00:45:17,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742379_1555 (size=156) 2024-11-28T00:45:17,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:17,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:17,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,040 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-11723378432475734386.jar 2024-11-28T00:45:19,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-10888279539529248974.jar 2024-11-28T00:45:19,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:45:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:45:19,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:45:19,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:45:19,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:45:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:45:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:45:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:45:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:45:19,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:45:19,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:45:19,133 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:19,133 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:19,133 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:45:19,133 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:19,134 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:19,134 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:45:19,134 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:45:19,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742380_1556 (size=6424743) 2024-11-28T00:45:19,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742380_1556 (size=6424743) 2024-11-28T00:45:19,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742380_1556 (size=6424743) 2024-11-28T00:45:19,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742381_1557 (size=24020) 2024-11-28T00:45:19,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742381_1557 (size=24020) 2024-11-28T00:45:19,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742381_1557 (size=24020) 2024-11-28T00:45:19,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742382_1558 (size=77755) 2024-11-28T00:45:19,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742382_1558 (size=77755) 2024-11-28T00:45:19,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742382_1558 (size=77755) 2024-11-28T00:45:19,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742383_1559 (size=131360) 2024-11-28T00:45:19,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742383_1559 (size=131360) 2024-11-28T00:45:19,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742383_1559 (size=131360) 2024-11-28T00:45:19,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742384_1560 (size=111793) 2024-11-28T00:45:19,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742384_1560 (size=111793) 2024-11-28T00:45:19,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742384_1560 (size=111793) 2024-11-28T00:45:19,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742385_1561 (size=1832290) 2024-11-28T00:45:19,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742385_1561 (size=1832290) 2024-11-28T00:45:19,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742385_1561 (size=1832290) 2024-11-28T00:45:19,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742386_1562 (size=8360005) 2024-11-28T00:45:19,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742386_1562 (size=8360005) 2024-11-28T00:45:19,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742386_1562 (size=8360005) 2024-11-28T00:45:19,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742387_1563 (size=503880) 2024-11-28T00:45:19,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742387_1563 (size=503880) 2024-11-28T00:45:19,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742387_1563 (size=503880) 2024-11-28T00:45:19,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742388_1564 (size=322274) 2024-11-28T00:45:19,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742388_1564 (size=322274) 2024-11-28T00:45:19,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742388_1564 (size=322274) 2024-11-28T00:45:19,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742389_1565 (size=20406) 2024-11-28T00:45:19,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742389_1565 (size=20406) 2024-11-28T00:45:19,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742389_1565 (size=20406) 2024-11-28T00:45:19,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742390_1566 (size=45609) 2024-11-28T00:45:19,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742390_1566 (size=45609) 2024-11-28T00:45:19,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742390_1566 (size=45609) 2024-11-28T00:45:19,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742391_1567 (size=440956) 2024-11-28T00:45:19,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742391_1567 (size=440956) 2024-11-28T00:45:19,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742391_1567 (size=440956) 2024-11-28T00:45:19,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742392_1568 (size=136454) 2024-11-28T00:45:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742392_1568 (size=136454) 2024-11-28T00:45:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742392_1568 (size=136454) 2024-11-28T00:45:19,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742393_1569 (size=1597136) 2024-11-28T00:45:19,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742393_1569 (size=1597136) 2024-11-28T00:45:19,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742393_1569 (size=1597136) 2024-11-28T00:45:19,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742394_1570 (size=30873) 2024-11-28T00:45:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742394_1570 (size=30873) 2024-11-28T00:45:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742394_1570 (size=30873) 2024-11-28T00:45:19,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742395_1571 (size=29229) 2024-11-28T00:45:19,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742395_1571 (size=29229) 2024-11-28T00:45:19,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742395_1571 (size=29229) 2024-11-28T00:45:19,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742396_1572 (size=903861) 2024-11-28T00:45:19,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742396_1572 (size=903861) 2024-11-28T00:45:19,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742396_1572 (size=903861) 2024-11-28T00:45:19,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742397_1573 (size=5175431) 2024-11-28T00:45:19,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742397_1573 (size=5175431) 2024-11-28T00:45:19,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742397_1573 (size=5175431) 2024-11-28T00:45:19,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742398_1574 (size=232881) 2024-11-28T00:45:19,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742398_1574 (size=232881) 2024-11-28T00:45:19,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742398_1574 (size=232881) 2024-11-28T00:45:19,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742399_1575 (size=1323991) 2024-11-28T00:45:19,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742399_1575 (size=1323991) 2024-11-28T00:45:19,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742399_1575 (size=1323991) 2024-11-28T00:45:19,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742400_1576 (size=4695811) 2024-11-28T00:45:19,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742400_1576 (size=4695811) 2024-11-28T00:45:19,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742400_1576 (size=4695811) 2024-11-28T00:45:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742401_1577 (size=1877034) 2024-11-28T00:45:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742401_1577 (size=1877034) 2024-11-28T00:45:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742401_1577 (size=1877034) 2024-11-28T00:45:20,153 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-11-28T00:45:20,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742402_1578 (size=217555) 2024-11-28T00:45:20,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742402_1578 (size=217555) 2024-11-28T00:45:20,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742402_1578 (size=217555) 2024-11-28T00:45:20,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742403_1579 (size=4188619) 2024-11-28T00:45:20,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742403_1579 (size=4188619) 2024-11-28T00:45:20,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742403_1579 (size=4188619) 2024-11-28T00:45:20,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742404_1580 (size=127628) 2024-11-28T00:45:20,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742404_1580 (size=127628) 2024-11-28T00:45:20,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742404_1580 (size=127628) 2024-11-28T00:45:20,600 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:45:20,603 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-28T00:45:20,604 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-11-28T00:45:20,605 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:45:20,605 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-11-28T00:45:20,605 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-28T00:45:20,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742405_1581 (size=1023) 2024-11-28T00:45:20,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742405_1581 (size=1023) 2024-11-28T00:45:20,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742405_1581 (size=1023) 2024-11-28T00:45:20,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742406_1582 (size=35) 2024-11-28T00:45:20,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742406_1582 (size=35) 2024-11-28T00:45:20,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742406_1582 (size=35) 2024-11-28T00:45:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742407_1583 (size=304077) 2024-11-28T00:45:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742407_1583 (size=304077) 2024-11-28T00:45:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742407_1583 (size=304077) 2024-11-28T00:45:20,800 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a074d787aab5a43a48b3164bc3a7256f, had cached 0 bytes from a total of 6086 2024-11-28T00:45:20,801 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region af0adc78f2023f6d163d04caaa3516c7, had cached 0 bytes from a total of 14465 2024-11-28T00:45:21,095 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ff027b3be0ce05f3c83052bcab03d6f5, had cached 0 bytes from a total of 5700 2024-11-28T00:45:22,440 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:45:22,440 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:45:22,443 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0009_000001 (auth:SIMPLE) from 127.0.0.1:51798 2024-11-28T00:45:22,458 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000001/launch_container.sh] 2024-11-28T00:45:22,458 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000001/container_tokens] 2024-11-28T00:45:22,458 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_0/usercache/jenkins/appcache/application_1732754375741_0009/container_1732754375741_0009_01_000001/sysfs] 2024-11-28T00:45:23,309 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:60580 2024-11-28T00:45:26,882 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:45:28,970 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:50230 2024-11-28T00:45:29,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742408_1584 (size=349775) 2024-11-28T00:45:29,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742408_1584 (size=349775) 2024-11-28T00:45:29,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742408_1584 (size=349775) 2024-11-28T00:45:30,977 WARN [regionserver/97f25515df69:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 5, running: 0 2024-11-28T00:45:31,058 WARN [regionserver/97f25515df69:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 1 2024-11-28T00:45:31,391 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:60584 2024-11-28T00:45:31,391 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:51800 2024-11-28T00:45:32,122 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:47770 2024-11-28T00:45:32,155 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:52154 2024-11-28T00:45:32,362 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:45:32,362 DEBUG [master/97f25515df69:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ff027b3be0ce05f3c83052bcab03d6f5 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:45:34,453 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0010_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:45:37,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742409_1585 (size=15055) 2024-11-28T00:45:37,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742409_1585 (size=15055) 2024-11-28T00:45:37,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742409_1585 (size=15055) 2024-11-28T00:45:41,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742411_1587 (size=5700) 2024-11-28T00:45:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742411_1587 (size=5700) 2024-11-28T00:45:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742411_1587 (size=5700) 2024-11-28T00:45:41,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742412_1588 (size=8241) 2024-11-28T00:45:41,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742412_1588 (size=8241) 2024-11-28T00:45:41,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742412_1588 (size=8241) 2024-11-28T00:45:41,613 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000004/launch_container.sh] 2024-11-28T00:45:41,613 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000004/container_tokens] 2024-11-28T00:45:41,618 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_2/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000004/sysfs] 2024-11-28T00:45:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742413_1589 (size=5032) 2024-11-28T00:45:41,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742413_1589 (size=5032) 2024-11-28T00:45:41,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742413_1589 (size=5032) 2024-11-28T00:45:41,835 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000003/launch_container.sh] 2024-11-28T00:45:41,835 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000003/container_tokens] 2024-11-28T00:45:41,836 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_3/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000003/sysfs] 2024-11-28T00:45:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742410_1586 (size=31742) 2024-11-28T00:45:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742410_1586 (size=31742) 2024-11-28T00:45:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742410_1586 (size=31742) 2024-11-28T00:45:41,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742414_1590 (size=463) 2024-11-28T00:45:41,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742414_1590 (size=463) 2024-11-28T00:45:41,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742414_1590 (size=463) 2024-11-28T00:45:42,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742415_1591 (size=31742) 2024-11-28T00:45:42,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742415_1591 (size=31742) 2024-11-28T00:45:42,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742415_1591 (size=31742) 2024-11-28T00:45:42,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742416_1592 (size=349775) 2024-11-28T00:45:42,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742416_1592 (size=349775) 2024-11-28T00:45:42,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742416_1592 (size=349775) 2024-11-28T00:45:42,129 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000005/launch_container.sh] 2024-11-28T00:45:42,129 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000005/container_tokens] 2024-11-28T00:45:42,129 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000005/sysfs] 2024-11-28T00:45:42,151 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:50900 2024-11-28T00:45:42,169 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:60852 2024-11-28T00:45:42,207 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:60866 2024-11-28T00:45:43,574 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000002/launch_container.sh] 2024-11-28T00:45:43,574 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000002/container_tokens] 2024-11-28T00:45:43,574 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000002/sysfs] 2024-11-28T00:45:43,894 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:45:43,895 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:45:43,950 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-11-28T00:45:43,950 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:45:43,951 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:45:43,951 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-28T00:45:43,952 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-28T00:45:43,952 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-28T00:45:43,952 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-28T00:45:43,952 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-28T00:45:43,952 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754717488/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-28T00:45:43,974 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithChecksum 2024-11-28T00:45:43,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:45:43,978 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754743978"}]},"ts":"1732754743978"} 2024-11-28T00:45:43,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-28T00:45:43,985 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-28T00:45:43,985 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-28T00:45:43,987 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-28T00:45:43,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, UNASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, UNASSIGN}] 2024-11-28T00:45:43,995 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, UNASSIGN 2024-11-28T00:45:43,995 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, UNASSIGN 2024-11-28T00:45:44,009 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:45:44,010 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=fc518652ec3b7fd6e62c4676cbff2f68, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:45:44,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, UNASSIGN because future has completed 2024-11-28T00:45:44,021 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:45:44,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:45:44,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, UNASSIGN because future has completed 2024-11-28T00:45:44,024 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:45:44,024 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:45:44,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-28T00:45:44,177 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(122): Close fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:45:44,177 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:45:44,177 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1722): Closing fc518652ec3b7fd6e62c4676cbff2f68, disabling compactions & flushes 2024-11-28T00:45:44,177 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:45:44,177 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:45:44,177 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. after waiting 0 ms 2024-11-28T00:45:44,177 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:45:44,179 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(122): Close ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:45:44,179 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:45:44,179 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1722): Closing ff027b3be0ce05f3c83052bcab03d6f5, disabling compactions & flushes 2024-11-28T00:45:44,179 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:45:44,180 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:45:44,180 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. after waiting 0 ms 2024-11-28T00:45:44,180 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:45:44,193 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:45:44,194 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:45:44,194 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68. 2024-11-28T00:45:44,194 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1676): Region close journal for fc518652ec3b7fd6e62c4676cbff2f68: Waiting for close lock at 1732754744177Running coprocessor pre-close hooks at 1732754744177Disabling compacts and flushes for region at 1732754744177Disabling writes for close at 1732754744177Writing region close event to WAL at 1732754744180 (+3 ms)Running coprocessor post-close hooks at 1732754744194 (+14 ms)Closed at 1732754744194 2024-11-28T00:45:44,196 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(157): Closed fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:45:44,196 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=fc518652ec3b7fd6e62c4676cbff2f68, regionState=CLOSED 2024-11-28T00:45:44,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:45:44,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-11-28T00:45:44,204 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; CloseRegionProcedure fc518652ec3b7fd6e62c4676cbff2f68, server=97f25515df69,34347,1732754369169 in 180 msec 2024-11-28T00:45:44,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=fc518652ec3b7fd6e62c4676cbff2f68, UNASSIGN in 210 msec 2024-11-28T00:45:44,206 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-11-28T00:45:44,208 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:45:44,208 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5. 2024-11-28T00:45:44,208 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1676): Region close journal for ff027b3be0ce05f3c83052bcab03d6f5: Waiting for close lock at 1732754744179Running coprocessor pre-close hooks at 1732754744179Disabling compacts and flushes for region at 1732754744179Disabling writes for close at 1732754744180 (+1 ms)Writing region close event to WAL at 1732754744182 (+2 ms)Running coprocessor post-close hooks at 1732754744208 (+26 ms)Closed at 1732754744208 2024-11-28T00:45:44,210 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(157): Closed ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:45:44,211 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=ff027b3be0ce05f3c83052bcab03d6f5, regionState=CLOSED 2024-11-28T00:45:44,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:45:44,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-11-28T00:45:44,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; CloseRegionProcedure ff027b3be0ce05f3c83052bcab03d6f5, server=97f25515df69,38585,1732754368938 in 192 msec 2024-11-28T00:45:44,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=231 2024-11-28T00:45:44,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ff027b3be0ce05f3c83052bcab03d6f5, UNASSIGN in 227 msec 2024-11-28T00:45:44,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-11-28T00:45:44,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 238 msec 2024-11-28T00:45:44,228 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754744228"}]},"ts":"1732754744228"} 2024-11-28T00:45:44,230 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-28T00:45:44,231 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-28T00:45:44,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 258 msec 2024-11-28T00:45:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-28T00:45:44,298 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:45:44,298 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithChecksum 2024-11-28T00:45:44,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:45:44,300 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:45:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-28T00:45:44,301 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=236, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:45:44,304 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-28T00:45:44,318 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:45:44,318 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:45:44,320 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/recovered.edits] 2024-11-28T00:45:44,326 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/recovered.edits] 2024-11-28T00:45:44,329 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/cf/8f6ae53e4ebb48dc96d22ffe315dcd34 2024-11-28T00:45:44,331 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/cf/9be64de79a1b41b79e1a68311c2a46d2 2024-11-28T00:45:44,332 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68/recovered.edits/9.seqid 2024-11-28T00:45:44,332 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:45:44,334 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/recovered.edits/12.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5/recovered.edits/12.seqid 2024-11-28T00:45:44,334 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportWithChecksum/ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:45:44,334 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-28T00:45:44,335 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-28T00:45:44,335 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-11-28T00:45:44,338 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128d0ef5ea6fb6842f78d4cc4e8b4269f56_fc518652ec3b7fd6e62c4676cbff2f68 2024-11-28T00:45:44,339 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112837894b54af774ed8a39b4a8722a548f0_ff027b3be0ce05f3c83052bcab03d6f5 2024-11-28T00:45:44,340 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-28T00:45:44,342 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=236, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:45:44,350 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-28T00:45:44,353 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-28T00:45:44,354 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=236, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:45:44,354 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-28T00:45:44,354 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754744354"}]},"ts":"9223372036854775807"} 2024-11-28T00:45:44,355 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754744354"}]},"ts":"9223372036854775807"} 2024-11-28T00:45:44,357 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:45:44,357 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ff027b3be0ce05f3c83052bcab03d6f5, NAME => 'testtb-testExportWithChecksum,,1732754658387.ff027b3be0ce05f3c83052bcab03d6f5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fc518652ec3b7fd6e62c4676cbff2f68, NAME => 'testtb-testExportWithChecksum,1,1732754658387.fc518652ec3b7fd6e62c4676cbff2f68.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:45:44,357 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-28T00:45:44,357 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754744357"}]},"ts":"9223372036854775807"} 2024-11-28T00:45:44,360 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-28T00:45:44,361 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=236, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:45:44,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 63 msec 2024-11-28T00:45:44,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,373 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,374 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:45:44,374 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:45:44,375 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:45:44,375 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:45:44,383 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,384 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:44,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:44,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:44,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:45:44,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:44,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-11-28T00:45:44,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:44,386 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-28T00:45:44,386 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:45:44,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:44,387 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:44,387 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:44,393 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-28T00:45:44,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-28T00:45:44,396 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-28T00:45:44,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-28T00:45:44,505 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=825 (was 812) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9026 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1451683306_1 at /127.0.0.1:47592 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:45978 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RSProcedureDispatcher-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #2 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #2 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:59998 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #2 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-932764424_22 at /127.0.0.1:53096 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073742373_1549] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e-prefix:97f25515df69,38585,1732754368938.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-823006435-172.17.0.3-1732754361847:blk_1073742373_1549, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_META-regionserver/97f25515df69:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:43921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:47608 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-932764424_22 at /127.0.0.1:41660 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073742373_1549] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-823006435-172.17.0.3-1732754361847:blk_1073742373_1549, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1451683306_1 at /127.0.0.1:59976 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-932764424_22 at /127.0.0.1:60068 [Receiving block BP-823006435-172.17.0.3-1732754361847:blk_1073742373_1549] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #2 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-823006435-172.17.0.3-1732754361847:blk_1073742373_1549, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 155720) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=829 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1193 (was 666) - SystemLoadAverage LEAK? -, ProcessCount=32 (was 24) - ProcessCount LEAK? -, AvailableMemoryMB=709 (was 2109) 2024-11-28T00:45:44,506 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=825 is superior to 500 2024-11-28T00:45:44,537 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=825, OpenFileDescriptor=829, MaxFileDescriptor=1048576, SystemLoadAverage=1193, ProcessCount=32, AvailableMemoryMB=709 2024-11-28T00:45:44,537 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=825 is superior to 500 2024-11-28T00:45:44,539 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:45:44,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:44,543 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:45:44,543 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 237 2024-11-28T00:45:44,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-28T00:45:44,545 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:45:44,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742417_1593 (size=454) 2024-11-28T00:45:44,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742417_1593 (size=454) 2024-11-28T00:45:44,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742417_1593 (size=454) 2024-11-28T00:45:44,581 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8a404fe988949d712f277e0c796244ae, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:45:44,589 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5547ba761c10f33f1cef2584c6d5256e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:45:44,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742419_1595 (size=79) 2024-11-28T00:45:44,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742419_1595 (size=79) 2024-11-28T00:45:44,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742419_1595 (size=79) 2024-11-28T00:45:44,645 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:45:44,645 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 5547ba761c10f33f1cef2584c6d5256e, disabling compactions & flushes 2024-11-28T00:45:44,645 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:44,645 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:44,645 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. after waiting 0 ms 2024-11-28T00:45:44,645 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:44,645 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:44,646 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5547ba761c10f33f1cef2584c6d5256e: Waiting for close lock at 1732754744645Disabling compacts and flushes for region at 1732754744645Disabling writes for close at 1732754744645Writing region close event to WAL at 1732754744645Closed at 1732754744645 2024-11-28T00:45:44,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-28T00:45:44,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742418_1594 (size=79) 2024-11-28T00:45:44,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742418_1594 (size=79) 2024-11-28T00:45:44,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742418_1594 (size=79) 2024-11-28T00:45:44,683 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:45:44,683 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 8a404fe988949d712f277e0c796244ae, disabling compactions & flushes 2024-11-28T00:45:44,683 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:44,683 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:44,683 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. after waiting 0 ms 2024-11-28T00:45:44,683 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:44,683 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:44,683 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8a404fe988949d712f277e0c796244ae: Waiting for close lock at 1732754744683Disabling compacts and flushes for region at 1732754744683Disabling writes for close at 1732754744683Writing region close event to WAL at 1732754744683Closed at 1732754744683 2024-11-28T00:45:44,685 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:45:44,685 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732754744685"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754744685"}]},"ts":"1732754744685"} 2024-11-28T00:45:44,685 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732754744685"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732754744685"}]},"ts":"1732754744685"} 2024-11-28T00:45:44,688 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:45:44,690 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:45:44,691 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754744690"}]},"ts":"1732754744690"} 2024-11-28T00:45:44,693 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-28T00:45:44,694 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {97f25515df69=0} racks are {/default-rack=0} 2024-11-28T00:45:44,697 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:45:44,697 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:45:44,697 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:45:44,697 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:45:44,697 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:45:44,697 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:45:44,697 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:45:44,697 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:45:44,697 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:45:44,697 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:45:44,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, ASSIGN}, {pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, ASSIGN}] 2024-11-28T00:45:44,700 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, ASSIGN 2024-11-28T00:45:44,700 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, ASSIGN 2024-11-28T00:45:44,701 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, ASSIGN; state=OFFLINE, location=97f25515df69,38585,1732754368938; forceNewPlan=false, retain=false 2024-11-28T00:45:44,702 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, ASSIGN; state=OFFLINE, location=97f25515df69,34347,1732754369169; forceNewPlan=false, retain=false 2024-11-28T00:45:44,852 INFO [97f25515df69:43227 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:45:44,853 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=5547ba761c10f33f1cef2584c6d5256e, regionState=OPENING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:45:44,853 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=8a404fe988949d712f277e0c796244ae, regionState=OPENING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:45:44,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, ASSIGN because future has completed 2024-11-28T00:45:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-28T00:45:44,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8a404fe988949d712f277e0c796244ae, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:45:44,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, ASSIGN because future has completed 2024-11-28T00:45:44,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=241, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5547ba761c10f33f1cef2584c6d5256e, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:45:45,024 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:45,025 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7752): Opening region: {ENCODED => 8a404fe988949d712f277e0c796244ae, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:45:45,025 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. service=AccessControlService 2024-11-28T00:45:45,025 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:45:45,026 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,026 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:45:45,026 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7794): checking encryption for 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,026 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7797): checking classloading for 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,028 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:45,028 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7752): Opening region: {ENCODED => 5547ba761c10f33f1cef2584c6d5256e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:45:45,029 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. service=AccessControlService 2024-11-28T00:45:45,029 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:45:45,029 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,029 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:45:45,029 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7794): checking encryption for 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,029 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7797): checking classloading for 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,037 INFO [StoreOpener-5547ba761c10f33f1cef2584c6d5256e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,039 INFO [StoreOpener-5547ba761c10f33f1cef2584c6d5256e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5547ba761c10f33f1cef2584c6d5256e columnFamilyName cf 2024-11-28T00:45:45,040 DEBUG [StoreOpener-5547ba761c10f33f1cef2584c6d5256e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:45:45,040 INFO [StoreOpener-5547ba761c10f33f1cef2584c6d5256e-1 {}] regionserver.HStore(327): Store=5547ba761c10f33f1cef2584c6d5256e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:45:45,041 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1038): replaying wal for 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,043 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,044 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,044 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1048): stopping wal replay for 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,044 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1060): Cleaning up temporary data for 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,044 INFO [StoreOpener-8a404fe988949d712f277e0c796244ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,046 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1093): writing seq id for 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,048 INFO [StoreOpener-8a404fe988949d712f277e0c796244ae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a404fe988949d712f277e0c796244ae columnFamilyName cf 2024-11-28T00:45:45,049 DEBUG [StoreOpener-8a404fe988949d712f277e0c796244ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:45:45,050 INFO [StoreOpener-8a404fe988949d712f277e0c796244ae-1 {}] regionserver.HStore(327): Store=8a404fe988949d712f277e0c796244ae/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:45:45,050 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1038): replaying wal for 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,051 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,051 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,052 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1048): stopping wal replay for 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,052 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1060): Cleaning up temporary data for 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,054 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1093): writing seq id for 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,057 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:45:45,058 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1114): Opened 5547ba761c10f33f1cef2584c6d5256e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66622219, jitterRate=-0.007251575589179993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:45:45,058 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,059 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1006): Region open journal for 5547ba761c10f33f1cef2584c6d5256e: Running coprocessor pre-open hook at 1732754745029Writing region info on filesystem at 1732754745030 (+1 ms)Initializing all the Stores at 1732754745033 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754745033Cleaning up temporary data from old regions at 1732754745044 (+11 ms)Running coprocessor post-open hooks at 1732754745058 (+14 ms)Region opened successfully at 1732754745059 (+1 ms) 2024-11-28T00:45:45,060 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e., pid=241, masterSystemTime=1732754745021 2024-11-28T00:45:45,062 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:45,062 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:45,063 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=5547ba761c10f33f1cef2584c6d5256e, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:45:45,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=241, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5547ba761c10f33f1cef2584c6d5256e, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:45:45,072 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:45:45,073 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1114): Opened 8a404fe988949d712f277e0c796244ae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60902194, jitterRate=-0.09248659014701843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:45:45,073 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,073 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1006): Region open journal for 8a404fe988949d712f277e0c796244ae: Running coprocessor pre-open hook at 1732754745026Writing region info on filesystem at 1732754745026Initializing all the Stores at 1732754745034 (+8 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732754745034Cleaning up temporary data from old regions at 1732754745052 (+18 ms)Running coprocessor post-open hooks at 1732754745073 (+21 ms)Region opened successfully at 1732754745073 2024-11-28T00:45:45,077 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae., pid=240, masterSystemTime=1732754745020 2024-11-28T00:45:45,079 DEBUG [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:45,079 INFO [RS_OPEN_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:45,080 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=8a404fe988949d712f277e0c796244ae, regionState=OPEN, openSeqNum=2, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:45:45,081 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-11-28T00:45:45,081 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; OpenRegionProcedure 5547ba761c10f33f1cef2584c6d5256e, server=97f25515df69,34347,1732754369169 in 210 msec 2024-11-28T00:45:45,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=240, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8a404fe988949d712f277e0c796244ae, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:45:45,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, ASSIGN in 383 msec 2024-11-28T00:45:45,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=238 2024-11-28T00:45:45,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=238, state=SUCCESS, hasLock=false; OpenRegionProcedure 8a404fe988949d712f277e0c796244ae, server=97f25515df69,38585,1732754368938 in 223 msec 2024-11-28T00:45:45,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=237 2024-11-28T00:45:45,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, ASSIGN in 391 msec 2024-11-28T00:45:45,094 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:45:45,094 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754745094"}]},"ts":"1732754745094"} 2024-11-28T00:45:45,097 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-28T00:45:45,099 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:45:45,099 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-28T00:45:45,105 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-28T00:45:45,125 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:45,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:45,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:45,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:45:45,137 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,137 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:45:45,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 598 msec 2024-11-28T00:45:45,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-28T00:45:45,168 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:45:45,168 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:45,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43795 {}] ipc.CallRunner(138): callId: 530 service: ClientService methodName: Scan size: 210 connection: 172.17.0.3:36336 deadline: 1732754805168, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:45:45,171 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:45:45,171 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:45:45,171 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=97f25515df69,43795,1732754369084, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=97f25515df69,38585,1732754368938, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=97f25515df69 port=38585 startCode=1732754368938. As of locationSeqNum=211. 2024-11-28T00:45:45,281 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:45,281 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:45,281 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:45:45,285 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:45,292 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:45,300 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:45,304 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:45:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754745304 (current time:1732754745304). 2024-11-28T00:45:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:45:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-28T00:45:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:45:45,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15fa8cc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:45,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:45:45,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:45:45,306 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:45:45,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:45:45,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:45:45,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@da629de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:45,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:45:45,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:45:45,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:45,310 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60536, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:45:45,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71c0e7cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:45,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:45:45,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,38585,1732754368938, seqNum=-1] 2024-11-28T00:45:45,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:45:45,315 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57608, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:45:45,322 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:45:45,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:45:45,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:45,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:45,323 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:45:45,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@419499ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:45,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:45:45,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:45:45,334 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:45:45,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:45:45,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:45:45,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aaf10d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:45,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:45:45,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:45:45,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:45,337 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60562, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:45:45,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4685a041, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:45,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:45:45,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,38585,1732754368938, seqNum=-1] 2024-11-28T00:45:45,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:45:45,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57612, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:45:45,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:45:45,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:45:45,347 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54280, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:45:45,348 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:45:45,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:45:45,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:45,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:45,348 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:45:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-28T00:45:45,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:45:45,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:45:45,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-28T00:45:45,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-28T00:45:45,352 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:45:45,354 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:45:45,357 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:45:45,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742420_1596 (size=203) 2024-11-28T00:45:45,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742420_1596 (size=203) 2024-11-28T00:45:45,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742420_1596 (size=203) 2024-11-28T00:45:45,385 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:45:45,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e}] 2024-11-28T00:45:45,387 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,387 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-28T00:45:45,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-11-28T00:45:45,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-11-28T00:45:45,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:45,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:45,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 5547ba761c10f33f1cef2584c6d5256e: 2024-11-28T00:45:45,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:45:45,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 8a404fe988949d712f277e0c796244ae: 2024-11-28T00:45:45,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:45:45,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:45,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:45:45,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:45:45,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:45,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:45:45,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:45:45,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742421_1597 (size=82) 2024-11-28T00:45:45,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742421_1597 (size=82) 2024-11-28T00:45:45,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742421_1597 (size=82) 2024-11-28T00:45:45,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:45,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-11-28T00:45:45,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-11-28T00:45:45,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,596 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:45,603 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae in 215 msec 2024-11-28T00:45:45,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742422_1598 (size=82) 2024-11-28T00:45:45,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742422_1598 (size=82) 2024-11-28T00:45:45,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742422_1598 (size=82) 2024-11-28T00:45:45,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:45,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-11-28T00:45:45,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-11-28T00:45:45,611 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:45,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=242 2024-11-28T00:45:45,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e in 228 msec 2024-11-28T00:45:45,616 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:45:45,617 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:45:45,622 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:45:45,622 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:45:45,622 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:45:45,623 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:45:45,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742423_1599 (size=74) 2024-11-28T00:45:45,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742423_1599 (size=74) 2024-11-28T00:45:45,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742423_1599 (size=74) 2024-11-28T00:45:45,653 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:45:45,653 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:45,654 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-28T00:45:45,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742424_1600 (size=697) 2024-11-28T00:45:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742424_1600 (size=697) 2024-11-28T00:45:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742424_1600 (size=697) 2024-11-28T00:45:45,718 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:45:45,727 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:45:45,727 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:45,730 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:45:45,730 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-28T00:45:45,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 380 msec 2024-11-28T00:45:45,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-28T00:45:45,978 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:45:45,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38585 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:45:45,988 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:45:45,990 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:45,994 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:45,994 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:45,994 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:45:45,997 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:46,004 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:46,012 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:45:46,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:45:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732754746016 (current time:1732754746016). 2024-11-28T00:45:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:45:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-28T00:45:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:45:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73a2927c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:45:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:45:46,018 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:45:46,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:45:46,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:45:46,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6263a748, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:46,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:45:46,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:45:46,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:46,020 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:45:46,020 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be3d17a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:46,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:45:46,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,38585,1732754368938, seqNum=-1] 2024-11-28T00:45:46,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:45:46,023 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57628, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:45:46,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:45:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:45:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:46,025 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:45:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c1344d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ClusterIdFetcher(90): Going to request 97f25515df69,43227,-1 for getting cluster id 2024-11-28T00:45:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:45:46,029 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8ae27c25-4edf-4df0-ab8c-1da49524c9af' 2024-11-28T00:45:46,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:45:46,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8ae27c25-4edf-4df0-ab8c-1da49524c9af" 2024-11-28T00:45:46,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c1675bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:46,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [97f25515df69,43227,-1] 2024-11-28T00:45:46,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:45:46,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:46,031 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60608, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:45:46,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b4649b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:45:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:45:46,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=97f25515df69,38585,1732754368938, seqNum=-1] 2024-11-28T00:45:46,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:45:46,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57636, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:45:46,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., hostname=97f25515df69,43795,1732754369084, seqNum=2] 2024-11-28T00:45:46,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:45:46,039 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54294, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:45:46,040 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227. 2024-11-28T00:45:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor263.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:45:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:45:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-28T00:45:46,041 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:45:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:45:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:45:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-11-28T00:45:46,044 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:45:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-28T00:45:46,050 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:45:46,052 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:45:46,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742425_1601 (size=198) 2024-11-28T00:45:46,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742425_1601 (size=198) 2024-11-28T00:45:46,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742425_1601 (size=198) 2024-11-28T00:45:46,068 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:45:46,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae}, {pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e}] 2024-11-28T00:45:46,070 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:46,071 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-28T00:45:46,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38585 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=246 2024-11-28T00:45:46,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:46,223 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2902): Flushing 8a404fe988949d712f277e0c796244ae 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:45:46,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=247 2024-11-28T00:45:46,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:46,224 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2902): Flushing 5547ba761c10f33f1cef2584c6d5256e 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:45:46,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112867bf87b86010439d8a0203de799d614c_5547ba761c10f33f1cef2584c6d5256e is 71, key is 10866b6872b06f6aac4fdb3478ea7b1f/cf:q/1732754745988/Put/seqid=0 2024-11-28T00:45:46,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280e19b3c795e94c1ca5d0a92634702fe6_8a404fe988949d712f277e0c796244ae is 71, key is 02822e1928ef5de53ad0708e35bbbf43/cf:q/1732754745986/Put/seqid=0 2024-11-28T00:45:46,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742426_1602 (size=8171) 2024-11-28T00:45:46,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742426_1602 (size=8171) 2024-11-28T00:45:46,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742426_1602 (size=8171) 2024-11-28T00:45:46,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:45:46,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742427_1603 (size=5102) 2024-11-28T00:45:46,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742427_1603 (size=5102) 2024-11-28T00:45:46,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742427_1603 (size=5102) 2024-11-28T00:45:46,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:45:46,265 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112867bf87b86010439d8a0203de799d614c_5547ba761c10f33f1cef2584c6d5256e to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112867bf87b86010439d8a0203de799d614c_5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:46,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/.tmp/cf/f8aadea2633e4c55982353b3b27d63c0, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=5547ba761c10f33f1cef2584c6d5256e] 2024-11-28T00:45:46,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/.tmp/cf/f8aadea2633e4c55982353b3b27d63c0 is 220, key is 1812aba00b39bd469fbcc78f1184629c2/cf:q/1732754745988/Put/seqid=0 2024-11-28T00:45:46,267 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280e19b3c795e94c1ca5d0a92634702fe6_8a404fe988949d712f277e0c796244ae to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411280e19b3c795e94c1ca5d0a92634702fe6_8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:46,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/.tmp/cf/54b9ff45c9ee4825a489383a5d3dd806, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=8a404fe988949d712f277e0c796244ae] 2024-11-28T00:45:46,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/.tmp/cf/54b9ff45c9ee4825a489383a5d3dd806 is 220, key is 0c41aa6a03863694c7ae9f7a73bfda921/cf:q/1732754745986/Put/seqid=0 2024-11-28T00:45:46,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742428_1604 (size=15525) 2024-11-28T00:45:46,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742428_1604 (size=15525) 2024-11-28T00:45:46,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742428_1604 (size=15525) 2024-11-28T00:45:46,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/.tmp/cf/f8aadea2633e4c55982353b3b27d63c0 2024-11-28T00:45:46,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742429_1605 (size=5962) 2024-11-28T00:45:46,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742429_1605 (size=5962) 2024-11-28T00:45:46,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742429_1605 (size=5962) 2024-11-28T00:45:46,276 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/.tmp/cf/54b9ff45c9ee4825a489383a5d3dd806 2024-11-28T00:45:46,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/.tmp/cf/f8aadea2633e4c55982353b3b27d63c0 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/cf/f8aadea2633e4c55982353b3b27d63c0 2024-11-28T00:45:46,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/.tmp/cf/54b9ff45c9ee4825a489383a5d3dd806 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/cf/54b9ff45c9ee4825a489383a5d3dd806 2024-11-28T00:45:46,283 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/cf/f8aadea2633e4c55982353b3b27d63c0, entries=47, sequenceid=6, filesize=15.2 K 2024-11-28T00:45:46,284 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/cf/54b9ff45c9ee4825a489383a5d3dd806, entries=3, sequenceid=6, filesize=5.8 K 2024-11-28T00:45:46,284 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 5547ba761c10f33f1cef2584c6d5256e in 60ms, sequenceid=6, compaction requested=false 2024-11-28T00:45:46,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-28T00:45:46,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 8a404fe988949d712f277e0c796244ae in 62ms, sequenceid=6, compaction requested=false 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2603): Flush status journal for 8a404fe988949d712f277e0c796244ae: 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2603): Flush status journal for 5547ba761c10f33f1cef2584c6d5256e: 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/cf/54b9ff45c9ee4825a489383a5d3dd806] hfiles 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/cf/54b9ff45c9ee4825a489383a5d3dd806 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/cf/f8aadea2633e4c55982353b3b27d63c0] hfiles 2024-11-28T00:45:46,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/cf/f8aadea2633e4c55982353b3b27d63c0 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742431_1607 (size=121) 2024-11-28T00:45:46,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742431_1607 (size=121) 2024-11-28T00:45:46,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742430_1606 (size=121) 2024-11-28T00:45:46,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:45:46,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=247 2024-11-28T00:45:46,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742431_1607 (size=121) 2024-11-28T00:45:46,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742430_1606 (size=121) 2024-11-28T00:45:46,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742430_1606 (size=121) 2024-11-28T00:45:46,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:45:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=247 2024-11-28T00:45:46,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/97f25515df69:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=246 2024-11-28T00:45:46,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:46,293 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:46,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster(4169): Remote procedure done, pid=246 2024-11-28T00:45:46,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:46,293 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:46,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5547ba761c10f33f1cef2584c6d5256e in 225 msec 2024-11-28T00:45:46,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-11-28T00:45:46,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8a404fe988949d712f277e0c796244ae in 225 msec 2024-11-28T00:45:46,295 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:45:46,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:45:46,297 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:45:46,297 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:45:46,297 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:45:46,298 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112867bf87b86010439d8a0203de799d614c_5547ba761c10f33f1cef2584c6d5256e, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411280e19b3c795e94c1ca5d0a92634702fe6_8a404fe988949d712f277e0c796244ae] hfiles 2024-11-28T00:45:46,298 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112867bf87b86010439d8a0203de799d614c_5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:45:46,298 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411280e19b3c795e94c1ca5d0a92634702fe6_8a404fe988949d712f277e0c796244ae 2024-11-28T00:45:46,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742432_1608 (size=305) 2024-11-28T00:45:46,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742432_1608 (size=305) 2024-11-28T00:45:46,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742432_1608 (size=305) 2024-11-28T00:45:46,306 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:45:46,306 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,306 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742433_1609 (size=1007) 2024-11-28T00:45:46,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742433_1609 (size=1007) 2024-11-28T00:45:46,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742433_1609 (size=1007) 2024-11-28T00:45:46,321 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:45:46,327 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:45:46,327 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,329 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:45:46,329 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-11-28T00:45:46,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 287 msec 2024-11-28T00:45:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-28T00:45:46,358 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:45:46,358 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358 2024-11-28T00:45:46,359 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44423, tgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358, rawTgtDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358, srcFsUri=hdfs://localhost:44423, srcDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:45:46,386 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44423, inputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e 2024-11-28T00:45:46,386 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,387 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:45:46,391 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:46,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742435_1611 (size=198) 2024-11-28T00:45:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742435_1611 (size=198) 2024-11-28T00:45:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742434_1610 (size=1007) 2024-11-28T00:45:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742435_1611 (size=198) 2024-11-28T00:45:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742434_1610 (size=1007) 2024-11-28T00:45:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742434_1610 (size=1007) 2024-11-28T00:45:46,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:46,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:46,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-12522599082469704419.jar 2024-11-28T00:45:47,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,410 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop-16387014502342632168.jar 2024-11-28T00:45:47,411 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,411 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-28T00:45:47,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:45:47,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:45:47,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:45:47,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:45:47,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:45:47,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:45:47,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:45:47,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:45:47,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:45:47,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:45:47,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:45:47,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:47,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:47,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:45:47,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:47,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:45:47,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:45:47,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:45:47,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742436_1612 (size=24020) 2024-11-28T00:45:47,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742436_1612 (size=24020) 2024-11-28T00:45:47,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742436_1612 (size=24020) 2024-11-28T00:45:47,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742437_1613 (size=77755) 2024-11-28T00:45:47,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742437_1613 (size=77755) 2024-11-28T00:45:47,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742437_1613 (size=77755) 2024-11-28T00:45:47,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742438_1614 (size=131360) 2024-11-28T00:45:47,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742438_1614 (size=131360) 2024-11-28T00:45:47,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742438_1614 (size=131360) 2024-11-28T00:45:47,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742439_1615 (size=111793) 2024-11-28T00:45:47,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742439_1615 (size=111793) 2024-11-28T00:45:47,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742439_1615 (size=111793) 2024-11-28T00:45:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742440_1616 (size=1832290) 2024-11-28T00:45:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742440_1616 (size=1832290) 2024-11-28T00:45:47,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742440_1616 (size=1832290) 2024-11-28T00:45:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742441_1617 (size=8360005) 2024-11-28T00:45:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742441_1617 (size=8360005) 2024-11-28T00:45:47,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742441_1617 (size=8360005) 2024-11-28T00:45:47,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742442_1618 (size=503880) 2024-11-28T00:45:47,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742442_1618 (size=503880) 2024-11-28T00:45:47,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742442_1618 (size=503880) 2024-11-28T00:45:47,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742443_1619 (size=322274) 2024-11-28T00:45:47,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742443_1619 (size=322274) 2024-11-28T00:45:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742443_1619 (size=322274) 2024-11-28T00:45:47,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742444_1620 (size=20406) 2024-11-28T00:45:47,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742444_1620 (size=20406) 2024-11-28T00:45:47,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742444_1620 (size=20406) 2024-11-28T00:45:47,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742445_1621 (size=45609) 2024-11-28T00:45:47,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742445_1621 (size=45609) 2024-11-28T00:45:47,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742445_1621 (size=45609) 2024-11-28T00:45:48,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742446_1622 (size=136454) 2024-11-28T00:45:48,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742446_1622 (size=136454) 2024-11-28T00:45:48,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742446_1622 (size=136454) 2024-11-28T00:45:48,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742447_1623 (size=440956) 2024-11-28T00:45:48,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742447_1623 (size=440956) 2024-11-28T00:45:48,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742447_1623 (size=440956) 2024-11-28T00:45:48,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742448_1624 (size=1597136) 2024-11-28T00:45:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742448_1624 (size=1597136) 2024-11-28T00:45:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742448_1624 (size=1597136) 2024-11-28T00:45:48,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742449_1625 (size=30873) 2024-11-28T00:45:48,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742449_1625 (size=30873) 2024-11-28T00:45:48,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742449_1625 (size=30873) 2024-11-28T00:45:48,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742450_1626 (size=29229) 2024-11-28T00:45:48,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742450_1626 (size=29229) 2024-11-28T00:45:48,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742450_1626 (size=29229) 2024-11-28T00:45:48,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742451_1627 (size=903861) 2024-11-28T00:45:48,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742451_1627 (size=903861) 2024-11-28T00:45:48,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742451_1627 (size=903861) 2024-11-28T00:45:48,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742452_1628 (size=6424743) 2024-11-28T00:45:48,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742452_1628 (size=6424743) 2024-11-28T00:45:48,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742452_1628 (size=6424743) 2024-11-28T00:45:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742453_1629 (size=5175431) 2024-11-28T00:45:48,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742453_1629 (size=5175431) 2024-11-28T00:45:48,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742453_1629 (size=5175431) 2024-11-28T00:45:48,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742454_1630 (size=232881) 2024-11-28T00:45:48,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742454_1630 (size=232881) 2024-11-28T00:45:48,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742454_1630 (size=232881) 2024-11-28T00:45:48,327 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0010_000001 (auth:SIMPLE) from 127.0.0.1:50916 2024-11-28T00:45:48,346 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000001/launch_container.sh] 2024-11-28T00:45:48,346 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000001/container_tokens] 2024-11-28T00:45:48,346 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0010/container_1732754375741_0010_01_000001/sysfs] 2024-11-28T00:45:48,441 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:45:48,442 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-28T00:45:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-28T00:45:48,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742455_1631 (size=1323991) 2024-11-28T00:45:48,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742455_1631 (size=1323991) 2024-11-28T00:45:48,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742455_1631 (size=1323991) 2024-11-28T00:45:48,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742456_1632 (size=4695811) 2024-11-28T00:45:48,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742456_1632 (size=4695811) 2024-11-28T00:45:48,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742456_1632 (size=4695811) 2024-11-28T00:45:48,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742457_1633 (size=1877034) 2024-11-28T00:45:48,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742457_1633 (size=1877034) 2024-11-28T00:45:48,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742457_1633 (size=1877034) 2024-11-28T00:45:48,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742458_1634 (size=217555) 2024-11-28T00:45:48,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742458_1634 (size=217555) 2024-11-28T00:45:48,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742458_1634 (size=217555) 2024-11-28T00:45:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742459_1635 (size=4188619) 2024-11-28T00:45:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742459_1635 (size=4188619) 2024-11-28T00:45:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742459_1635 (size=4188619) 2024-11-28T00:45:48,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742460_1636 (size=127628) 2024-11-28T00:45:48,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742460_1636 (size=127628) 2024-11-28T00:45:48,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742460_1636 (size=127628) 2024-11-28T00:45:48,764 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:45:48,766 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-28T00:45:48,767 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.2 K 2024-11-28T00:45:48,767 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:45:48,768 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-11-28T00:45:48,768 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:45:48,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742461_1637 (size=1079) 2024-11-28T00:45:48,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742461_1637 (size=1079) 2024-11-28T00:45:48,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742461_1637 (size=1079) 2024-11-28T00:45:48,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742462_1638 (size=35) 2024-11-28T00:45:48,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742462_1638 (size=35) 2024-11-28T00:45:48,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742462_1638 (size=35) 2024-11-28T00:45:48,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742463_1639 (size=304251) 2024-11-28T00:45:48,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742463_1639 (size=304251) 2024-11-28T00:45:48,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742463_1639 (size=304251) 2024-11-28T00:45:48,817 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:45:48,817 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:45:49,116 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0011_000001 (auth:SIMPLE) from 127.0.0.1:60880 2024-11-28T00:45:49,725 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:45:56,140 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0011_000001 (auth:SIMPLE) from 127.0.0.1:42046 2024-11-28T00:45:56,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742464_1640 (size=349973) 2024-11-28T00:45:56,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742464_1640 (size=349973) 2024-11-28T00:45:56,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742464_1640 (size=349973) 2024-11-28T00:45:56,882 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:45:58,442 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0011_000001 (auth:SIMPLE) from 127.0.0.1:44000 2024-11-28T00:45:58,446 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0011_000001 (auth:SIMPLE) from 127.0.0.1:45560 2024-11-28T00:45:59,252 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0011_000001 (auth:SIMPLE) from 127.0.0.1:44016 2024-11-28T00:45:59,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0011_000001 (auth:SIMPLE) from 127.0.0.1:45570 2024-11-28T00:46:01,320 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732754375741_0011_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:46:04,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742465_1641 (size=8171) 2024-11-28T00:46:04,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742465_1641 (size=8171) 2024-11-28T00:46:04,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742465_1641 (size=8171) 2024-11-28T00:46:05,140 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000003/launch_container.sh] 2024-11-28T00:46:05,140 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000003/container_tokens] 2024-11-28T00:46:05,140 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000003/sysfs] 2024-11-28T00:46:05,153 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-11-28T00:46:05,800 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a074d787aab5a43a48b3164bc3a7256f, had cached 0 bytes from a total of 6086 2024-11-28T00:46:05,801 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region af0adc78f2023f6d163d04caaa3516c7, had cached 0 bytes from a total of 14465 2024-11-28T00:46:06,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742467_1643 (size=5962) 2024-11-28T00:46:06,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742467_1643 (size=5962) 2024-11-28T00:46:06,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742467_1643 (size=5962) 2024-11-28T00:46:07,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742468_1644 (size=5102) 2024-11-28T00:46:07,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742468_1644 (size=5102) 2024-11-28T00:46:07,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742468_1644 (size=5102) 2024-11-28T00:46:07,183 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000005/launch_container.sh] 2024-11-28T00:46:07,184 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000005/container_tokens] 2024-11-28T00:46:07,184 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_2/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000005/sysfs] 2024-11-28T00:46:07,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742469_1645 (size=15525) 2024-11-28T00:46:07,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742469_1645 (size=15525) 2024-11-28T00:46:07,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742469_1645 (size=15525) 2024-11-28T00:46:07,288 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000004/launch_container.sh] 2024-11-28T00:46:07,288 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000004/container_tokens] 2024-11-28T00:46:07,288 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_0/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000004/sysfs] 2024-11-28T00:46:07,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742466_1642 (size=31804) 2024-11-28T00:46:07,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742466_1642 (size=31804) 2024-11-28T00:46:07,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742466_1642 (size=31804) 2024-11-28T00:46:07,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742470_1646 (size=477) 2024-11-28T00:46:07,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742470_1646 (size=477) 2024-11-28T00:46:07,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742470_1646 (size=477) 2024-11-28T00:46:07,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742471_1647 (size=31804) 2024-11-28T00:46:07,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742471_1647 (size=31804) 2024-11-28T00:46:07,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742471_1647 (size=31804) 2024-11-28T00:46:07,432 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000002/launch_container.sh] 2024-11-28T00:46:07,432 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000002/container_tokens] 2024-11-28T00:46:07,432 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-0_3/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000002/sysfs] 2024-11-28T00:46:07,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742472_1648 (size=349973) 2024-11-28T00:46:07,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742472_1648 (size=349973) 2024-11-28T00:46:07,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742472_1648 (size=349973) 2024-11-28T00:46:07,464 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732754375741_0011_000001 (auth:SIMPLE) from 127.0.0.1:40580 2024-11-28T00:46:09,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:46:09,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:46:09,060 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,060 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:46:09,060 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:46:09,060 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,061 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-28T00:46:09,061 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-28T00:46:09,061 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_151319224_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,061 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-28T00:46:09,061 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/export-test/export-1732754746358/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-28T00:46:09,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-28T00:46:09,088 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754769087"}]},"ts":"1732754769087"} 2024-11-28T00:46:09,091 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-28T00:46:09,091 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-28T00:46:09,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-28T00:46:09,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, UNASSIGN}, {pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, UNASSIGN}] 2024-11-28T00:46:09,096 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, UNASSIGN 2024-11-28T00:46:09,096 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, UNASSIGN 2024-11-28T00:46:09,098 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=5547ba761c10f33f1cef2584c6d5256e, regionState=CLOSING, regionLocation=97f25515df69,34347,1732754369169 2024-11-28T00:46:09,099 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=8a404fe988949d712f277e0c796244ae, regionState=CLOSING, regionLocation=97f25515df69,38585,1732754368938 2024-11-28T00:46:09,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, UNASSIGN because future has completed 2024-11-28T00:46:09,102 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:46:09,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5547ba761c10f33f1cef2584c6d5256e, server=97f25515df69,34347,1732754369169}] 2024-11-28T00:46:09,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, UNASSIGN because future has completed 2024-11-28T00:46:09,103 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:46:09,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8a404fe988949d712f277e0c796244ae, server=97f25515df69,38585,1732754368938}] 2024-11-28T00:46:09,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-28T00:46:09,256 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(122): Close 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:46:09,256 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:46:09,256 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1722): Closing 5547ba761c10f33f1cef2584c6d5256e, disabling compactions & flushes 2024-11-28T00:46:09,256 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:46:09,256 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:46:09,256 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. after waiting 0 ms 2024-11-28T00:46:09,256 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:46:09,257 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(122): Close 8a404fe988949d712f277e0c796244ae 2024-11-28T00:46:09,258 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:46:09,258 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1722): Closing 8a404fe988949d712f277e0c796244ae, disabling compactions & flushes 2024-11-28T00:46:09,258 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:46:09,258 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:46:09,258 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. after waiting 0 ms 2024-11-28T00:46:09,258 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:46:09,278 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:46:09,279 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:09,279 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e. 2024-11-28T00:46:09,279 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1676): Region close journal for 5547ba761c10f33f1cef2584c6d5256e: Waiting for close lock at 1732754769256Running coprocessor pre-close hooks at 1732754769256Disabling compacts and flushes for region at 1732754769256Disabling writes for close at 1732754769256Writing region close event to WAL at 1732754769257 (+1 ms)Running coprocessor post-close hooks at 1732754769279 (+22 ms)Closed at 1732754769279 2024-11-28T00:46:09,282 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(157): Closed 5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:46:09,282 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=5547ba761c10f33f1cef2584c6d5256e, regionState=CLOSED 2024-11-28T00:46:09,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5547ba761c10f33f1cef2584c6d5256e, server=97f25515df69,34347,1732754369169 because future has completed 2024-11-28T00:46:09,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=252, resume processing ppid=251 2024-11-28T00:46:09,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=252, ppid=251, state=SUCCESS, hasLock=false; CloseRegionProcedure 5547ba761c10f33f1cef2584c6d5256e, server=97f25515df69,34347,1732754369169 in 185 msec 2024-11-28T00:46:09,293 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:46:09,294 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:09,294 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae. 2024-11-28T00:46:09,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5547ba761c10f33f1cef2584c6d5256e, UNASSIGN in 198 msec 2024-11-28T00:46:09,294 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1676): Region close journal for 8a404fe988949d712f277e0c796244ae: Waiting for close lock at 1732754769258Running coprocessor pre-close hooks at 1732754769258Disabling compacts and flushes for region at 1732754769258Disabling writes for close at 1732754769258Writing region close event to WAL at 1732754769288 (+30 ms)Running coprocessor post-close hooks at 1732754769294 (+6 ms)Closed at 1732754769294 2024-11-28T00:46:09,297 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=8a404fe988949d712f277e0c796244ae, regionState=CLOSED 2024-11-28T00:46:09,299 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(157): Closed 8a404fe988949d712f277e0c796244ae 2024-11-28T00:46:09,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8a404fe988949d712f277e0c796244ae, server=97f25515df69,38585,1732754368938 because future has completed 2024-11-28T00:46:09,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=253, resume processing ppid=250 2024-11-28T00:46:09,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=253, ppid=250, state=SUCCESS, hasLock=false; CloseRegionProcedure 8a404fe988949d712f277e0c796244ae, server=97f25515df69,38585,1732754368938 in 199 msec 2024-11-28T00:46:09,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=249 2024-11-28T00:46:09,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8a404fe988949d712f277e0c796244ae, UNASSIGN in 211 msec 2024-11-28T00:46:09,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-11-28T00:46:09,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 218 msec 2024-11-28T00:46:09,326 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732754769326"}]},"ts":"1732754769326"} 2024-11-28T00:46:09,328 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-28T00:46:09,328 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-28T00:46:09,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 251 msec 2024-11-28T00:46:09,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-28T00:46:09,408 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:46:09,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] procedure2.ProcedureExecutor(1139): Stored pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,414 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,416 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=254, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,425 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43795 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,432 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae 2024-11-28T00:46:09,434 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/recovered.edits] 2024-11-28T00:46:09,439 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/cf/54b9ff45c9ee4825a489383a5d3dd806 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/cf/54b9ff45c9ee4825a489383a5d3dd806 2024-11-28T00:46:09,441 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:46:09,442 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae/recovered.edits/9.seqid 2024-11-28T00:46:09,443 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/8a404fe988949d712f277e0c796244ae 2024-11-28T00:46:09,444 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/cf, FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/recovered.edits] 2024-11-28T00:46:09,448 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/cf/f8aadea2633e4c55982353b3b27d63c0 to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/cf/f8aadea2633e4c55982353b3b27d63c0 2024-11-28T00:46:09,451 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/recovered.edits/9.seqid to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e/recovered.edits/9.seqid 2024-11-28T00:46:09,452 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testtb-testExportFileSystemStateWithSkipTmp/5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:46:09,452 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-28T00:46:09,453 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-28T00:46:09,453 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-11-28T00:46:09,459 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112867bf87b86010439d8a0203de799d614c_5547ba761c10f33f1cef2584c6d5256e to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112867bf87b86010439d8a0203de799d614c_5547ba761c10f33f1cef2584c6d5256e 2024-11-28T00:46:09,461 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411280e19b3c795e94c1ca5d0a92634702fe6_8a404fe988949d712f277e0c796244ae to hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411280e19b3c795e94c1ca5d0a92634702fe6_8a404fe988949d712f277e0c796244ae 2024-11-28T00:46:09,461 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-28T00:46:09,464 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=254, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,470 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-28T00:46:09,473 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-28T00:46:09,475 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=254, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,475 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-28T00:46:09,476 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754769475"}]},"ts":"9223372036854775807"} 2024-11-28T00:46:09,476 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732754769475"}]},"ts":"9223372036854775807"} 2024-11-28T00:46:09,478 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:46:09,478 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 8a404fe988949d712f277e0c796244ae, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732754744538.8a404fe988949d712f277e0c796244ae.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5547ba761c10f33f1cef2584c6d5256e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:46:09,478 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-28T00:46:09,479 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732754769478"}]},"ts":"9223372036854775807"} 2024-11-28T00:46:09,482 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-28T00:46:09,483 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=254, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=254, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 74 msec 2024-11-28T00:46:09,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,514 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:46:09,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:46:09,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:46:09,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:46:09,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,529 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:46:09,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,529 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:46:09,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:46:09,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:46:09,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-11-28T00:46:09,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:46:09,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:46:09,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:46:09,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:46:09,531 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,531 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:46:09,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-28T00:46:09,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-28T00:46:09,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:09,569 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=838 (was 825) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:40874 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-360795890_1 at /127.0.0.1:35636 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_151319224_22 at /127.0.0.1:35662 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46607 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:46607 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-360795890_1 at /127.0.0.1:40856 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9888 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 159027) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=833 (was 829) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1068 (was 1193), ProcessCount=35 (was 32) - ProcessCount LEAK? -, AvailableMemoryMB=4863 (was 709) - AvailableMemoryMB LEAK? - 2024-11-28T00:46:09,569 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=838 is superior to 500 2024-11-28T00:46:09,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-28T00:46:09,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@485fbdad{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:46:09,581 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6dc79a2c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:46:09,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:46:09,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78cbf550{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:46:09,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e5c03a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:46:09,603 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732754375741_0011_01_000001 is : 143 2024-11-28T00:46:09,620 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000001/launch_container.sh] 2024-11-28T00:46:09,620 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000001/container_tokens] 2024-11-28T00:46:09,620 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1960488903/yarn-6423671845/MiniMRCluster_1960488903-localDir-nm-1_1/usercache/jenkins/appcache/application_1732754375741_0011/container_1732754375741_0011_01_000001/sysfs] 2024-11-28T00:46:14,808 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:46:18,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:46:23,946 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:46:26,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b49ff65{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:46:26,602 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24a77e77{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:46:26,602 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:46:26,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44dd1d2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:46:26,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15a2daaa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:46:26,883 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:46:43,618 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-28T00:46:43,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26bd67ae{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-28T00:46:43,619 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f470f8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:46:43,619 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:46:43,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32f7927f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:46:43,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8c8026f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:46:43,622 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-28T00:46:43,627 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-28T00:46:43,627 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-28T00:46:43,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741830_1006 (size=1185894) 2024-11-28T00:46:43,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741830_1006 (size=1185894) 2024-11-28T00:46:43,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741830_1006 (size=1185894) 2024-11-28T00:46:43,633 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-28T00:46:43,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c81a02c{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-28T00:46:43,635 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@703bd321{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:46:43,635 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:46:43,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3748e205{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:46:43,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a682620{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:46:43,637 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-28T00:46:43,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-28T00:46:43,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-28T00:46:43,637 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-28T00:46:43,637 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:46:43,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,638 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-28T00:46:43,638 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=17650064, stopped=false 2024-11-28T00:46:43,638 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:46:43,638 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,638 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-28T00:46:43,638 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=97f25515df69,43227,1732754367980 2024-11-28T00:46:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:46:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:46:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:46:43,695 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:46:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:46:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:46:43,695 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:46:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:46:43,695 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-28T00:46:43,695 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-28T00:46:43,695 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:46:43,695 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,695 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:46:43,696 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '97f25515df69,38585,1732754368938' ***** 2024-11-28T00:46:43,696 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:46:43,696 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:46:43,696 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,696 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-28T00:46:43,696 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '97f25515df69,43795,1732754369084' ***** 2024-11-28T00:46:43,696 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:46:43,696 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,696 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-28T00:46:43,696 INFO [RS:0;97f25515df69:38585 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T00:46:43,697 INFO [RS:1;97f25515df69:43795 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T00:46:43,697 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '97f25515df69,34347,1732754369169' ***** 2024-11-28T00:46:43,697 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,697 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-28T00:46:43,697 INFO [RS:0;97f25515df69:38585 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T00:46:43,697 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-28T00:46:43,697 INFO [RS:1;97f25515df69:43795 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T00:46:43,697 INFO [RS:2;97f25515df69:34347 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T00:46:43,697 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-28T00:46:43,697 INFO [RS:1;97f25515df69:43795 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T00:46:43,697 INFO [RS:0;97f25515df69:38585 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T00:46:43,697 INFO [RS:2;97f25515df69:34347 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T00:46:43,697 INFO [RS:2;97f25515df69:34347 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T00:46:43,697 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-28T00:46:43,697 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(959): stopping server 97f25515df69,38585,1732754368938 2024-11-28T00:46:43,697 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(3091): Received CLOSE for a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:46:43,697 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(3091): Received CLOSE for af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:46:43,697 INFO [RS:0;97f25515df69:38585 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:46:43,697 INFO [RS:0;97f25515df69:38585 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;97f25515df69:38585. 2024-11-28T00:46:43,698 DEBUG [RS:0;97f25515df69:38585 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:46:43,698 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(959): stopping server 97f25515df69,34347,1732754369169 2024-11-28T00:46:43,698 DEBUG [RS:0;97f25515df69:38585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,698 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(3091): Received CLOSE for 47c99a40beabefa004915b0134843184 2024-11-28T00:46:43,698 INFO [RS:2;97f25515df69:34347 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:46:43,698 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(959): stopping server 97f25515df69,43795,1732754369084 2024-11-28T00:46:43,698 INFO [RS:2;97f25515df69:34347 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;97f25515df69:34347. 2024-11-28T00:46:43,698 INFO [RS:1;97f25515df69:43795 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:46:43,698 INFO [RS:1;97f25515df69:43795 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;97f25515df69:43795. 2024-11-28T00:46:43,698 DEBUG [RS:2;97f25515df69:34347 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:46:43,698 INFO [RS:0;97f25515df69:38585 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T00:46:43,698 DEBUG [RS:2;97f25515df69:34347 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,698 INFO [RS:0;97f25515df69:38585 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T00:46:43,698 DEBUG [RS:1;97f25515df69:43795 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:46:43,698 INFO [RS:0;97f25515df69:38585 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T00:46:43,698 DEBUG [RS:1;97f25515df69:43795 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,698 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-28T00:46:43,698 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-28T00:46:43,698 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-28T00:46:43,698 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1325): Online Regions={a074d787aab5a43a48b3164bc3a7256f=testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f., 47c99a40beabefa004915b0134843184=hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184.} 2024-11-28T00:46:43,698 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1325): Online Regions={af0adc78f2023f6d163d04caaa3516c7=testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7.} 2024-11-28T00:46:43,698 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing af0adc78f2023f6d163d04caaa3516c7, disabling compactions & flushes 2024-11-28T00:46:43,698 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:46:43,698 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:46:43,698 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. after waiting 0 ms 2024-11-28T00:46:43,698 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-28T00:46:43,698 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:46:43,699 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-28T00:46:43,699 DEBUG [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1351): Waiting on 47c99a40beabefa004915b0134843184, a074d787aab5a43a48b3164bc3a7256f 2024-11-28T00:46:43,699 DEBUG [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-28T00:46:43,699 DEBUG [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1351): Waiting on af0adc78f2023f6d163d04caaa3516c7 2024-11-28T00:46:43,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a074d787aab5a43a48b3164bc3a7256f, disabling compactions & flushes 2024-11-28T00:46:43,699 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:46:43,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:46:43,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. after waiting 0 ms 2024-11-28T00:46:43,699 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:46:43,700 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-28T00:46:43,700 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-28T00:46:43,700 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-28T00:46:43,700 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T00:46:43,700 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T00:46:43,700 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=10.52 KB heapSize=17.46 KB 2024-11-28T00:46:43,703 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/af0adc78f2023f6d163d04caaa3516c7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:46:43,703 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/default/testExportExpiredSnapshot/a074d787aab5a43a48b3164bc3a7256f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,704 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:46:43,704 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a074d787aab5a43a48b3164bc3a7256f: Waiting for close lock at 1732754803698Running coprocessor pre-close hooks at 1732754803699 (+1 ms)Disabling compacts and flushes for region at 1732754803699Disabling writes for close at 1732754803699Writing region close event to WAL at 1732754803700 (+1 ms)Running coprocessor post-close hooks at 1732754803704 (+4 ms)Closed at 1732754803704 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for af0adc78f2023f6d163d04caaa3516c7: Waiting for close lock at 1732754803698Running coprocessor pre-close hooks at 1732754803698Disabling compacts and flushes for region at 1732754803698Disabling writes for close at 1732754803698Writing region close event to WAL at 1732754803699 (+1 ms)Running coprocessor post-close hooks at 1732754803703 (+4 ms)Closed at 1732754803704 (+1 ms) 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732754630459.a074d787aab5a43a48b3164bc3a7256f. 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732754630459.af0adc78f2023f6d163d04caaa3516c7. 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 47c99a40beabefa004915b0134843184, disabling compactions & flushes 2024-11-28T00:46:43,704 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. after waiting 0 ms 2024-11-28T00:46:43,704 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:46:43,704 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 47c99a40beabefa004915b0134843184 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-11-28T00:46:43,716 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/info/bbfe8598d75b4dd1bce3172edd7b10bc is 121, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e./info:/1732754769475/DeleteFamily/seqid=0 2024-11-28T00:46:43,718 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184/.tmp/l/154d1b438e6340cba58fc45a6fa1887c is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732754628011/DeleteFamily/seqid=0 2024-11-28T00:46:43,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742473_1649 (size=6387) 2024-11-28T00:46:43,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742473_1649 (size=6387) 2024-11-28T00:46:43,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742473_1649 (size=6387) 2024-11-28T00:46:43,721 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.67 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/info/bbfe8598d75b4dd1bce3172edd7b10bc 2024-11-28T00:46:43,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742474_1650 (size=5860) 2024-11-28T00:46:43,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742474_1650 (size=5860) 2024-11-28T00:46:43,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742474_1650 (size=5860) 2024-11-28T00:46:43,723 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184/.tmp/l/154d1b438e6340cba58fc45a6fa1887c 2024-11-28T00:46:43,726 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 154d1b438e6340cba58fc45a6fa1887c 2024-11-28T00:46:43,727 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184/.tmp/l/154d1b438e6340cba58fc45a6fa1887c as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184/l/154d1b438e6340cba58fc45a6fa1887c 2024-11-28T00:46:43,731 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 154d1b438e6340cba58fc45a6fa1887c 2024-11-28T00:46:43,731 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184/l/154d1b438e6340cba58fc45a6fa1887c, entries=14, sequenceid=31, filesize=5.7 K 2024-11-28T00:46:43,732 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 47c99a40beabefa004915b0134843184 in 28ms, sequenceid=31, compaction requested=false 2024-11-28T00:46:43,735 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/acl/47c99a40beabefa004915b0134843184/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-11-28T00:46:43,736 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,736 INFO [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:46:43,736 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 47c99a40beabefa004915b0134843184: Waiting for close lock at 1732754803704Running coprocessor pre-close hooks at 1732754803704Disabling compacts and flushes for region at 1732754803704Disabling writes for close at 1732754803704Obtaining lock to block concurrent updates at 1732754803704Preparing flush snapshotting stores in 47c99a40beabefa004915b0134843184 at 1732754803704Finished memstore snapshotting hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1732754803705 (+1 ms)Flushing stores of hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. at 1732754803705Flushing 47c99a40beabefa004915b0134843184/l: creating writer at 1732754803705Flushing 47c99a40beabefa004915b0134843184/l: appending metadata at 1732754803718 (+13 ms)Flushing 47c99a40beabefa004915b0134843184/l: closing flushed file at 1732754803718Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5704432a: reopening flushed file at 1732754803726 (+8 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 47c99a40beabefa004915b0134843184 in 28ms, sequenceid=31, compaction requested=false at 1732754803732 (+6 ms)Writing region close event to WAL at 1732754803733 (+1 ms)Running coprocessor post-close hooks at 1732754803736 (+3 ms)Closed at 1732754803736 2024-11-28T00:46:43,736 DEBUG [RS_CLOSE_REGION-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732754372299.47c99a40beabefa004915b0134843184. 2024-11-28T00:46:43,742 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/ns/7f6382a36d96435dac4abb74670f73b6 is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e./ns:/1732754769464/DeleteFamily/seqid=0 2024-11-28T00:46:43,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742475_1651 (size=5927) 2024-11-28T00:46:43,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742475_1651 (size=5927) 2024-11-28T00:46:43,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742475_1651 (size=5927) 2024-11-28T00:46:43,746 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/ns/7f6382a36d96435dac4abb74670f73b6 2024-11-28T00:46:43,761 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/rep_barrier/2a59fe655af344e2b7fdbd92f440fec7 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e./rep_barrier:/1732754769464/DeleteFamily/seqid=0 2024-11-28T00:46:43,763 INFO [regionserver/97f25515df69:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:46:43,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742476_1652 (size=5990) 2024-11-28T00:46:43,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742476_1652 (size=5990) 2024-11-28T00:46:43,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742476_1652 (size=5990) 2024-11-28T00:46:43,766 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/rep_barrier/2a59fe655af344e2b7fdbd92f440fec7 2024-11-28T00:46:43,769 INFO [regionserver/97f25515df69:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:46:43,778 INFO [regionserver/97f25515df69:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:46:43,783 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/table/a5c04db4d06240248d91d06af555369a is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732754744538.5547ba761c10f33f1cef2584c6d5256e./table:/1732754769464/DeleteFamily/seqid=0 2024-11-28T00:46:43,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742477_1653 (size=6012) 2024-11-28T00:46:43,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742477_1653 (size=6012) 2024-11-28T00:46:43,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742477_1653 (size=6012) 2024-11-28T00:46:43,788 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/table/a5c04db4d06240248d91d06af555369a 2024-11-28T00:46:43,792 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/info/bbfe8598d75b4dd1bce3172edd7b10bc as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/info/bbfe8598d75b4dd1bce3172edd7b10bc 2024-11-28T00:46:43,795 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/info/bbfe8598d75b4dd1bce3172edd7b10bc, entries=8, sequenceid=246, filesize=6.2 K 2024-11-28T00:46:43,796 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/ns/7f6382a36d96435dac4abb74670f73b6 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/ns/7f6382a36d96435dac4abb74670f73b6 2024-11-28T00:46:43,800 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/ns/7f6382a36d96435dac4abb74670f73b6, entries=4, sequenceid=246, filesize=5.8 K 2024-11-28T00:46:43,801 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/rep_barrier/2a59fe655af344e2b7fdbd92f440fec7 as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/rep_barrier/2a59fe655af344e2b7fdbd92f440fec7 2024-11-28T00:46:43,804 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/rep_barrier/2a59fe655af344e2b7fdbd92f440fec7, entries=4, sequenceid=246, filesize=5.8 K 2024-11-28T00:46:43,805 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/.tmp/table/a5c04db4d06240248d91d06af555369a as hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/table/a5c04db4d06240248d91d06af555369a 2024-11-28T00:46:43,808 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/table/a5c04db4d06240248d91d06af555369a, entries=6, sequenceid=246, filesize=5.9 K 2024-11-28T00:46:43,809 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~10.52 KB/10771, heapSize ~17.40 KB/17816, currentSize=0 B/0 for 1588230740 in 109ms, sequenceid=246, compaction requested=false 2024-11-28T00:46:43,809 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-28T00:46:43,814 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/data/hbase/meta/1588230740/recovered.edits/249.seqid, newMaxSeqId=249, maxSeqId=214 2024-11-28T00:46:43,814 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:43,814 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T00:46:43,814 INFO [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-28T00:46:43,814 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732754803699Running coprocessor pre-close hooks at 1732754803699Disabling compacts and flushes for region at 1732754803699Disabling writes for close at 1732754803700 (+1 ms)Obtaining lock to block concurrent updates at 1732754803700Preparing flush snapshotting stores in 1588230740 at 1732754803700Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=10771, getHeapSize=17816, getOffHeapSize=0, getCellsCount=81 at 1732754803700Flushing stores of hbase:meta,,1.1588230740 at 1732754803701 (+1 ms)Flushing 1588230740/info: creating writer at 1732754803701Flushing 1588230740/info: appending metadata at 1732754803716 (+15 ms)Flushing 1588230740/info: closing flushed file at 1732754803716Flushing 1588230740/ns: creating writer at 1732754803726 (+10 ms)Flushing 1588230740/ns: appending metadata at 1732754803741 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732754803741Flushing 1588230740/rep_barrier: creating writer at 1732754803749 (+8 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732754803761 (+12 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732754803761Flushing 1588230740/table: creating writer at 1732754803770 (+9 ms)Flushing 1588230740/table: appending metadata at 1732754803783 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732754803783Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18f7a26e: reopening flushed file at 1732754803791 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74f87443: reopening flushed file at 1732754803796 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b8dd4c6: reopening flushed file at 1732754803800 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61fe758c: reopening flushed file at 1732754803804 (+4 ms)Finished flush of dataSize ~10.52 KB/10771, heapSize ~17.40 KB/17816, currentSize=0 B/0 for 1588230740 in 109ms, sequenceid=246, compaction requested=false at 1732754803809 (+5 ms)Writing region close event to WAL at 1732754803811 (+2 ms)Running coprocessor post-close hooks at 1732754803814 (+3 ms)Closed at 1732754803814 2024-11-28T00:46:43,815 DEBUG [RS_CLOSE_META-regionserver/97f25515df69:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-28T00:46:43,899 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(976): stopping server 97f25515df69,34347,1732754369169; all regions closed. 2024-11-28T00:46:43,899 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(976): stopping server 97f25515df69,38585,1732754368938; all regions closed. 2024-11-28T00:46:43,899 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(976): stopping server 97f25515df69,43795,1732754369084; all regions closed. 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741836_1012 (size=89810) 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742373_1549 (size=13412) 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741835_1011 (size=11922) 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741836_1012 (size=89810) 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741836_1012 (size=89810) 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741835_1011 (size=11922) 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741835_1011 (size=11922) 2024-11-28T00:46:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742373_1549 (size=13412) 2024-11-28T00:46:43,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742373_1549 (size=13412) 2024-11-28T00:46:43,907 DEBUG [RS:2;97f25515df69:34347 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs 2024-11-28T00:46:43,907 DEBUG [RS:1;97f25515df69:43795 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs 2024-11-28T00:46:43,907 DEBUG [RS:0;97f25515df69:38585 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs 2024-11-28T00:46:43,907 INFO [RS:1;97f25515df69:43795 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 97f25515df69%2C43795%2C1732754369084.meta:.meta(num 1732754371635) 2024-11-28T00:46:43,907 INFO [RS:2;97f25515df69:34347 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 97f25515df69%2C34347%2C1732754369169:(num 1732754371153) 2024-11-28T00:46:43,907 INFO [RS:0;97f25515df69:38585 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 97f25515df69%2C38585%2C1732754368938.meta:.meta(num 1732754675067) 2024-11-28T00:46:43,907 DEBUG [RS:2;97f25515df69:34347 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,907 INFO [RS:2;97f25515df69:34347 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:46:43,908 INFO [RS:2;97f25515df69:34347 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:46:43,908 INFO [RS:2;97f25515df69:34347 {}] hbase.ChoreService(370): Chore service for: regionserver/97f25515df69:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-28T00:46:43,908 INFO [RS:2;97f25515df69:34347 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T00:46:43,908 INFO [RS:2;97f25515df69:34347 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T00:46:43,908 INFO [regionserver/97f25515df69:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:46:43,908 INFO [RS:2;97f25515df69:34347 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T00:46:43,908 INFO [RS:2;97f25515df69:34347 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:46:43,909 INFO [RS:2;97f25515df69:34347 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34347 2024-11-28T00:46:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741834_1010 (size=17245) 2024-11-28T00:46:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741834_1010 (size=17245) 2024-11-28T00:46:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741834_1010 (size=17245) 2024-11-28T00:46:43,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073741833_1009 (size=14627) 2024-11-28T00:46:43,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073741833_1009 (size=14627) 2024-11-28T00:46:43,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073741833_1009 (size=14627) 2024-11-28T00:46:43,912 DEBUG [RS:1;97f25515df69:43795 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs 2024-11-28T00:46:43,912 INFO [RS:1;97f25515df69:43795 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 97f25515df69%2C43795%2C1732754369084:(num 1732754371144) 2024-11-28T00:46:43,912 DEBUG [RS:1;97f25515df69:43795 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,912 INFO [RS:1;97f25515df69:43795 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:46:43,913 INFO [RS:1;97f25515df69:43795 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:46:43,913 DEBUG [RS:0;97f25515df69:38585 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/oldWALs 2024-11-28T00:46:43,913 INFO [RS:0;97f25515df69:38585 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 97f25515df69%2C38585%2C1732754368938:(num 1732754371150) 2024-11-28T00:46:43,913 DEBUG [RS:0;97f25515df69:38585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:46:43,913 INFO [RS:1;97f25515df69:43795 {}] hbase.ChoreService(370): Chore service for: regionserver/97f25515df69:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-28T00:46:43,913 INFO [RS:0;97f25515df69:38585 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:46:43,913 INFO [RS:0;97f25515df69:38585 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:46:43,913 INFO [RS:1;97f25515df69:43795 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T00:46:43,913 INFO [RS:1;97f25515df69:43795 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T00:46:43,913 INFO [regionserver/97f25515df69:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:46:43,913 INFO [RS:1;97f25515df69:43795 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T00:46:43,913 INFO [RS:1;97f25515df69:43795 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:46:43,913 INFO [RS:0;97f25515df69:38585 {}] hbase.ChoreService(370): Chore service for: regionserver/97f25515df69:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-28T00:46:43,913 INFO [RS:0;97f25515df69:38585 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:46:43,913 INFO [regionserver/97f25515df69:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:46:43,913 INFO [RS:1;97f25515df69:43795 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43795 2024-11-28T00:46:43,913 INFO [RS:0;97f25515df69:38585 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38585 2024-11-28T00:46:43,926 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/97f25515df69,34347,1732754369169 2024-11-28T00:46:43,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T00:46:43,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/97f25515df69,43795,1732754369084 2024-11-28T00:46:43,926 INFO [RS:2;97f25515df69:34347 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:46:43,926 INFO [RS:1;97f25515df69:43795 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:46:43,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/97f25515df69,38585,1732754368938 2024-11-28T00:46:43,926 INFO [RS:0;97f25515df69:38585 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:46:43,937 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [97f25515df69,34347,1732754369169] 2024-11-28T00:46:43,947 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/97f25515df69,34347,1732754369169 already deleted, retry=false 2024-11-28T00:46:43,947 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 97f25515df69,34347,1732754369169 expired; onlineServers=2 2024-11-28T00:46:43,947 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [97f25515df69,38585,1732754368938] 2024-11-28T00:46:43,958 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/97f25515df69,38585,1732754368938 already deleted, retry=false 2024-11-28T00:46:43,958 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 97f25515df69,38585,1732754368938 expired; onlineServers=1 2024-11-28T00:46:43,958 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [97f25515df69,43795,1732754369084] 2024-11-28T00:46:43,968 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/97f25515df69,43795,1732754369084 already deleted, retry=false 2024-11-28T00:46:43,968 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 97f25515df69,43795,1732754369084 expired; onlineServers=0 2024-11-28T00:46:43,968 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '97f25515df69,43227,1732754367980' ***** 2024-11-28T00:46:43,969 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-28T00:46:43,969 INFO [M:0;97f25515df69:43227 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:46:43,969 INFO [M:0;97f25515df69:43227 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:46:43,969 DEBUG [M:0;97f25515df69:43227 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-28T00:46:43,969 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-28T00:46:43,969 DEBUG [M:0;97f25515df69:43227 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-28T00:46:43,969 DEBUG [master/97f25515df69:0:becomeActiveMaster-HFileCleaner.large.0-1732754370782 {}] cleaner.HFileCleaner(306): Exit Thread[master/97f25515df69:0:becomeActiveMaster-HFileCleaner.large.0-1732754370782,5,FailOnTimeoutGroup] 2024-11-28T00:46:43,969 DEBUG [master/97f25515df69:0:becomeActiveMaster-HFileCleaner.small.0-1732754370783 {}] cleaner.HFileCleaner(306): Exit Thread[master/97f25515df69:0:becomeActiveMaster-HFileCleaner.small.0-1732754370783,5,FailOnTimeoutGroup] 2024-11-28T00:46:43,970 INFO [M:0;97f25515df69:43227 {}] hbase.ChoreService(370): Chore service for: master/97f25515df69:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-28T00:46:43,970 INFO [M:0;97f25515df69:43227 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:46:43,970 DEBUG [M:0;97f25515df69:43227 {}] master.HMaster(1795): Stopping service threads 2024-11-28T00:46:43,970 INFO [M:0;97f25515df69:43227 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-28T00:46:43,970 INFO [M:0;97f25515df69:43227 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-28T00:46:43,971 INFO [M:0;97f25515df69:43227 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-28T00:46:43,971 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-28T00:46:43,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-28T00:46:43,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:46:43,979 DEBUG [M:0;97f25515df69:43227 {}] zookeeper.ZKUtil(347): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-28T00:46:43,980 WARN [M:0;97f25515df69:43227 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-28T00:46:43,981 INFO [M:0;97f25515df69:43227 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/.lastflushedseqids 2024-11-28T00:46:43,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46245 is added to blk_1073742478_1654 (size=325) 2024-11-28T00:46:43,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45777 is added to blk_1073742478_1654 (size=325) 2024-11-28T00:46:43,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38327 is added to blk_1073742478_1654 (size=325) 2024-11-28T00:46:43,996 INFO [M:0;97f25515df69:43227 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-28T00:46:43,996 INFO [M:0;97f25515df69:43227 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-28T00:46:43,996 DEBUG [M:0;97f25515df69:43227 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T00:46:44,009 INFO [M:0;97f25515df69:43227 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:46:44,010 DEBUG [M:0;97f25515df69:43227 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:46:44,010 DEBUG [M:0;97f25515df69:43227 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T00:46:44,010 DEBUG [M:0;97f25515df69:43227 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:46:44,010 INFO [M:0;97f25515df69:43227 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=1008.51 KB heapSize=1.18 MB 2024-11-28T00:46:44,010 ERROR [AsyncFSWAL-0-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:46:44,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:46:44,037 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:46:44,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:46:44,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38585-0x1017ee15dea0001, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:46:44,037 DEBUG [pool-75-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34347-0x1017ee15dea0003, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:46:44,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43795-0x1017ee15dea0002, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:46:44,038 INFO [RS:2;97f25515df69:34347 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:46:44,038 INFO [RS:0;97f25515df69:38585 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:46:44,038 INFO [RS:1;97f25515df69:43795 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:46:44,038 INFO [RS:0;97f25515df69:38585 {}] regionserver.HRegionServer(1031): Exiting; stopping=97f25515df69,38585,1732754368938; zookeeper connection closed. 2024-11-28T00:46:44,038 INFO [RS:2;97f25515df69:34347 {}] regionserver.HRegionServer(1031): Exiting; stopping=97f25515df69,34347,1732754369169; zookeeper connection closed. 2024-11-28T00:46:44,038 INFO [RS:1;97f25515df69:43795 {}] regionserver.HRegionServer(1031): Exiting; stopping=97f25515df69,43795,1732754369084; zookeeper connection closed. 2024-11-28T00:46:44,039 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f4fdb63 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f4fdb63 2024-11-28T00:46:44,039 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@665fb9c2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@665fb9c2 2024-11-28T00:46:44,039 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3bbe8903 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3bbe8903 2024-11-28T00:46:44,039 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-28T00:46:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T00:46:48,442 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-28T00:46:48,443 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-28T00:46:48,444 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-28T00:46:48,445 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:48,445 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-28T00:46:48,445 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:46:49,226 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:46:56,883 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:47:26,883 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;97f25515df69:43227 236 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 26 Waited count: 23 Waiting on java.lang.ref.ReferenceQueue$Lock@3090def3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@428bd3dc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5039 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.CountDownLatch$Sync@565d228d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12974 Waited count: 13714 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@636dc4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@7f64fe42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1003 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:36369}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3543 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6015e7db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44423): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 168 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 168 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 49080 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1607 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e7e7941 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44423): State: TIMED_WAITING Blocked count: 93 Waited count: 2737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44423): State: TIMED_WAITING Blocked count: 102 Waited count: 2747 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44423): State: TIMED_WAITING Blocked count: 122 Waited count: 2744 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44423): State: TIMED_WAITING Blocked count: 92 Waited count: 2727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44423): State: TIMED_WAITING Blocked count: 118 Waited count: 2733 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 251 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:36565}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 999 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39661): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 374 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@295114a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1614 Waited count: 1674 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@14443a1a-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:43333}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (1065665691) connection to localhost/127.0.0.1:44423 from jenkins): State: TIMED_WAITING Blocked count: 1497 Waited count: 1497 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (IPC Parameter Sending Thread for localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 0 Waited count: 2303 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 999 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35775): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b194364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1540 Waited count: 1666 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154-acceptor-0@394f407-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:41329}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 998 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39593): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 380 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d364d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1530 Waited count: 1673 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 201 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@64823ac[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (java.util.concurrent.ThreadPoolExecutor$Worker@7aed9948[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@7e2ea54b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56850): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 249 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 390 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b9caab2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:56850):): State: WAITING Blocked count: 2 Waited count: 496 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bf82e3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3bfa18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 246 (LeaseRenewer:jenkins@localhost:44423): State: TIMED_WAITING Blocked count: 14 Waited count: 517 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@362e7529 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 371 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:56850)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47693e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 29 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@13d9870c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 8 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@561b2f63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 226 Waited count: 869 Waiting on java.util.concurrent.Semaphore$NonfairSync@1383ec33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 18 Waited count: 166 Waiting on java.util.concurrent.Semaphore$NonfairSync@1d94a981 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227): State: WAITING Blocked count: 60 Waited count: 10396 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@422abcd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@41ab7c26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@794b1d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6e10b2be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@c29df0e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f9c8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 85 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;97f25515df69:43227): State: TIMED_WAITING Blocked count: 12 Waited count: 4348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1114/0x00007f8a20f6d950.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@26d7e4af): State: TIMED_WAITING Blocked count: 0 Waited count: 165 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4933 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 107 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 85 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 418 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49245 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b9a079d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ff45fdf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3078ff04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e4cc42c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 507 (LeaseRenewer:jenkins.hfs.2@localhost:44423): State: TIMED_WAITING Blocked count: 14 Waited count: 518 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 501 (LeaseRenewer:jenkins.hfs.1@localhost:44423): State: TIMED_WAITING Blocked count: 14 Waited count: 518 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (LeaseRenewer:jenkins.hfs.0@localhost:44423): State: TIMED_WAITING Blocked count: 14 Waited count: 517 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49072 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 755 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1187 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47e4a9ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1295 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@6e68a4a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1878 (region-location-3): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1879 (region-location-4): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2189 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 221 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3410 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 504 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10286 (ForkJoinPool.commonPool-worker-7): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10287 (ForkJoinPool.commonPool-worker-8): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10285 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11451 (AsyncFSWAL-1-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@279d3c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11454 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-28T00:47:56,884 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:48:26,884 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;97f25515df69:43227 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 26 Waited count: 23 Waiting on java.lang.ref.ReferenceQueue$Lock@3090def3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@428bd3dc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5638 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 57 Waiting on java.util.concurrent.CountDownLatch$Sync@411d92b3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12974 Waited count: 13715 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@636dc4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@7f64fe42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1123 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:36369}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3543 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6015e7db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44423): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 55004 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1607 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e7e7941 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44423): State: TIMED_WAITING Blocked count: 106 Waited count: 2797 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44423): State: TIMED_WAITING Blocked count: 103 Waited count: 2808 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44423): State: TIMED_WAITING Blocked count: 122 Waited count: 2804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44423): State: TIMED_WAITING Blocked count: 92 Waited count: 2787 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44423): State: TIMED_WAITING Blocked count: 122 Waited count: 2793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 281 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:36565}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39661): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 394 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@295114a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1636 Waited count: 1727 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@14443a1a-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:43333}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (1065665691) connection to localhost/127.0.0.1:44423 from jenkins): State: TIMED_WAITING Blocked count: 1537 Waited count: 1537 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (IPC Parameter Sending Thread for localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 0 Waited count: 2343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35775): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 401 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b194364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1562 Waited count: 1713 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 562 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154-acceptor-0@394f407-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:41329}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39593): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 400 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d364d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1550 Waited count: 1713 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 201 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@64823ac[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (java.util.concurrent.ThreadPoolExecutor$Worker@7aed9948[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@7e2ea54b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56850): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 279 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 395 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b9caab2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:56850):): State: WAITING Blocked count: 2 Waited count: 501 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bf82e3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 529 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3bfa18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@362e7529 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:56850)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47693e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 29 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@13d9870c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 8 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@561b2f63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 226 Waited count: 869 Waiting on java.util.concurrent.Semaphore$NonfairSync@1383ec33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 18 Waited count: 166 Waiting on java.util.concurrent.Semaphore$NonfairSync@1d94a981 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227): State: WAITING Blocked count: 60 Waited count: 10396 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@422abcd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@41ab7c26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@794b1d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6e10b2be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@c29df0e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f9c8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 85 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;97f25515df69:43227): State: TIMED_WAITING Blocked count: 12 Waited count: 4348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1114/0x00007f8a20f6d950.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@26d7e4af): State: TIMED_WAITING Blocked count: 0 Waited count: 185 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5532 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 107 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 85 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e12449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 418 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55247 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b9a079d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ff45fdf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3078ff04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e4cc42c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 512 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55075 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 755 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47e4a9ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1295 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@6e68a4a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1878 (region-location-3): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1879 (region-location-4): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2189 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 221 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3410 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 504 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10286 (ForkJoinPool.commonPool-worker-7): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10287 (ForkJoinPool.commonPool-worker-8): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11451 (AsyncFSWAL-1-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@279d3c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11454 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-28T00:48:56,884 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:49:26,885 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:49:29,308 DEBUG [master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=28, reuseRatio=73.68% 2024-11-28T00:49:29,309 DEBUG [master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-28T00:49:37,238 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;97f25515df69:43227 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 26 Waited count: 23 Waiting on java.lang.ref.ReferenceQueue$Lock@3090def3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@428bd3dc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6238 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 63 Waiting on java.util.concurrent.CountDownLatch$Sync@6afb4bb9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12974 Waited count: 13716 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@636dc4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@7f64fe42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1243 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:36369}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3543 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6015e7db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44423): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 60928 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1607 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e7e7941 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44423): State: TIMED_WAITING Blocked count: 120 Waited count: 2857 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44423): State: TIMED_WAITING Blocked count: 103 Waited count: 2868 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44423): State: TIMED_WAITING Blocked count: 123 Waited count: 2864 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44423): State: TIMED_WAITING Blocked count: 94 Waited count: 2847 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44423): State: TIMED_WAITING Blocked count: 124 Waited count: 2853 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 311 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:36565}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1239 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39661): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 414 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@295114a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1658 Waited count: 1778 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 635 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@14443a1a-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:43333}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (1065665691) connection to localhost/127.0.0.1:44423 from jenkins): State: TIMED_WAITING Blocked count: 1577 Waited count: 1577 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (IPC Parameter Sending Thread for localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 0 Waited count: 2383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1239 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35775): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 421 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b194364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1582 Waited count: 1762 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 622 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 641 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154-acceptor-0@394f407-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:41329}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1238 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39593): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 420 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d364d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1570 Waited count: 1753 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 201 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7367a1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35029fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@64823ac[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (java.util.concurrent.ThreadPoolExecutor$Worker@7aed9948[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3cdb8729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@7e2ea54b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56850): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 309 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 399 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b9caab2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:56850):): State: WAITING Blocked count: 2 Waited count: 505 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bf82e3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 533 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3bfa18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@362e7529 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:56850)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47693e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 29 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@13d9870c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 8 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@561b2f63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 226 Waited count: 869 Waiting on java.util.concurrent.Semaphore$NonfairSync@1383ec33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 18 Waited count: 166 Waiting on java.util.concurrent.Semaphore$NonfairSync@1d94a981 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227): State: WAITING Blocked count: 60 Waited count: 10396 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@422abcd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@41ab7c26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@794b1d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6e10b2be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@c29df0e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f9c8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 85 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;97f25515df69:43227): State: TIMED_WAITING Blocked count: 12 Waited count: 4348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1114/0x00007f8a20f6d950.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@26d7e4af): State: TIMED_WAITING Blocked count: 0 Waited count: 205 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6131 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 107 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 85 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e12449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 418 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61250 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b9a079d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ff45fdf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3078ff04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e4cc42c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 512 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61077 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 755 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1199 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47e4a9ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1295 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@6e68a4a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1878 (region-location-3): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1879 (region-location-4): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2189 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 221 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3410 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 504 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10287 (ForkJoinPool.commonPool-worker-8): State: TIMED_WAITING Blocked count: 0 Waited count: 84 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11451 (AsyncFSWAL-1-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@279d3c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11455 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11460 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-28T00:49:56,885 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:50:26,885 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;97f25515df69:43227 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 26 Waited count: 23 Waiting on java.lang.ref.ReferenceQueue$Lock@3090def3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 37 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@428bd3dc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 69 Waiting on java.util.concurrent.CountDownLatch$Sync@d9ee685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12974 Waited count: 13717 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@636dc4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@7f64fe42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1363 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:36369}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3543 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6015e7db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44423): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 66856 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1607 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e7e7941 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44423): State: TIMED_WAITING Blocked count: 120 Waited count: 2918 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44423): State: TIMED_WAITING Blocked count: 103 Waited count: 2929 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44423): State: TIMED_WAITING Blocked count: 123 Waited count: 2924 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44423): State: TIMED_WAITING Blocked count: 94 Waited count: 2907 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44423): State: TIMED_WAITING Blocked count: 140 Waited count: 2913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 341 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:36565}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1359 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39661): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 434 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@295114a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1680 Waited count: 1825 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 681 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@14443a1a-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:43333}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (1065665691) connection to localhost/127.0.0.1:44423 from jenkins): State: TIMED_WAITING Blocked count: 1618 Waited count: 1618 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (IPC Parameter Sending Thread for localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 0 Waited count: 2427 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1359 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35775): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 441 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b194364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1603 Waited count: 1811 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 703 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 681 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154-acceptor-0@394f407-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:41329}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1358 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39593): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 440 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d364d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1590 Waited count: 1793 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 201 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7367a1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35029fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@64823ac[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (java.util.concurrent.ThreadPoolExecutor$Worker@7aed9948[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3cdb8729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@7e2ea54b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56850): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b9caab2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:56850):): State: WAITING Blocked count: 2 Waited count: 509 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bf82e3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 537 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3bfa18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@362e7529 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:56850)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47693e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 29 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@13d9870c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 8 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@561b2f63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 226 Waited count: 869 Waiting on java.util.concurrent.Semaphore$NonfairSync@1383ec33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 18 Waited count: 166 Waiting on java.util.concurrent.Semaphore$NonfairSync@1d94a981 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227): State: WAITING Blocked count: 60 Waited count: 10396 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@422abcd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@41ab7c26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@794b1d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6e10b2be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@c29df0e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f9c8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 85 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;97f25515df69:43227): State: TIMED_WAITING Blocked count: 12 Waited count: 4348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1114/0x00007f8a20f6d950.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@26d7e4af): State: TIMED_WAITING Blocked count: 0 Waited count: 225 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6731 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 107 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 85 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e12449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 418 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b9a079d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ff45fdf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3078ff04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e4cc42c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 512 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67079 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 756 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1205 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47e4a9ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1295 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@6e68a4a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1878 (region-location-3): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1879 (region-location-4): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2189 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 221 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3410 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 504 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11451 (AsyncFSWAL-1-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@279d3c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11455 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11460 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-28T00:50:56,885 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:51:26,886 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:51:44,012 DEBUG [M:0;97f25515df69:43227 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732754803996Disabling compacts and flushes for region at 1732754803996Disabling writes for close at 1732754804010 (+14 ms)Obtaining lock to block concurrent updates at 1732754804010Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732754804010Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1032713, getHeapSize=1239544, getOffHeapSize=0, getCellsCount=2727 at 1732754804010Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732755104011 (+300001 ms) 2024-11-28T00:51:44,012 WARN [M:0;97f25515df69:43227 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4667, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4667, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-28T00:51:44,014 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:51:44,016 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-28T00:51:44,016 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-28T00:51:44,016 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730 2024-11-28T00:51:44,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:51:44,019 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:51:44,019 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730 2024-11-28T00:51:44,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;97f25515df69:43227 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 26 Waited count: 23 Waiting on java.lang.ref.ReferenceQueue$Lock@3090def3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 29 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 36 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@428bd3dc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 75 Waiting on java.util.concurrent.CountDownLatch$Sync@3d88d20c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12974 Waited count: 13718 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@636dc4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@7f64fe42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1483 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:36369}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3543 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6015e7db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44423): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 248 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 72796 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1607 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e7e7941 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44423): State: TIMED_WAITING Blocked count: 120 Waited count: 2978 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44423): State: TIMED_WAITING Blocked count: 103 Waited count: 2989 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44423): State: TIMED_WAITING Blocked count: 124 Waited count: 2985 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44423): State: TIMED_WAITING Blocked count: 94 Waited count: 2967 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44423): State: TIMED_WAITING Blocked count: 141 Waited count: 2973 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 371 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:36565}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1479 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39661): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 454 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@295114a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1700 Waited count: 1867 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 765 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 765 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39661): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@14443a1a-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:43333}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (1065665691) connection to localhost/127.0.0.1:44423 from jenkins): State: TIMED_WAITING Blocked count: 1673 Waited count: 1673 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (IPC Parameter Sending Thread for localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 0 Waited count: 2485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1479 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35775): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 461 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b194364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1623 Waited count: 1851 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 742 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 763 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 761 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35775): State: TIMED_WAITING Blocked count: 0 Waited count: 745 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f8a2042a800.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154-acceptor-0@394f407-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:41329}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1478 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39593): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 460 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d364d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423): State: TIMED_WAITING Blocked count: 1610 Waited count: 1833 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 745 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 744 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39593): State: TIMED_WAITING Blocked count: 0 Waited count: 744 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 201 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5/current/BP-823006435-172.17.0.3-1732754361847): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7367a1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35029fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@64823ac[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (java.util.concurrent.ThreadPoolExecutor$Worker@7aed9948[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3cdb8729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@7e2ea54b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56850): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 369 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 408 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b9caab2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:56850):): State: WAITING Blocked count: 2 Waited count: 514 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bf82e3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 542 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3bfa18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@362e7529 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:56850)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47693e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 29 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@13d9870c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 8 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18c985b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@561b2f63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 226 Waited count: 869 Waiting on java.util.concurrent.Semaphore$NonfairSync@1383ec33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 18 Waited count: 166 Waiting on java.util.concurrent.Semaphore$NonfairSync@1d94a981 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43227): State: WAITING Blocked count: 60 Waited count: 10396 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@422abcd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d45f3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@41ab7c26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@794b1d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6e10b2be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43227): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@c29df0e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f9c8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 85 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;97f25515df69:43227): State: TIMED_WAITING Blocked count: 12 Waited count: 4349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1442/0x00007f8a2120d650.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/97f25515df69:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@26d7e4af): State: TIMED_WAITING Blocked count: 0 Waited count: 245 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7330 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 107 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 85 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e12449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 418 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73254 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b9a079d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ff45fdf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3078ff04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/97f25515df69:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e4cc42c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 512 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73081 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1211 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47e4a9ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1295 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@6e68a4a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1878 (region-location-3): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1879 (region-location-4): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e19897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2189 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 221 Waiting on java.util.concurrent.ForkJoinPool@78362153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3410 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11451 (AsyncFSWAL-1-hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData-prefix:97f25515df69,43227,1732754367980): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@279d3c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11460 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11463 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11464 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1427/0x00007f8a21206cf0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:51:48,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:51:49,014 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-28T00:51:49,015 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:51:49,015 INFO [M:0;97f25515df69:43227 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-28T00:51:49,016 INFO [M:0;97f25515df69:43227 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43227 2024-11-28T00:51:49,017 INFO [M:0;97f25515df69:43227 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:51:49,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44423/user/jenkins/test-data/d2672214-a379-a133-499a-10133b1a668e/MasterData/WALs/97f25515df69,43227,1732754367980/97f25515df69%2C43227%2C1732754367980.1732754369730 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-28T00:51:49,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:51:49,129 INFO [M:0;97f25515df69:43227 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:51:49,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43227-0x1017ee15dea0000, quorum=127.0.0.1:56850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:51:49,169 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@367f3488{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:51:49,169 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:51:49,169 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:51:49,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:51:49,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:51:49,171 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T00:51:49,171 WARN [BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T00:51:49,171 WARN [BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-823006435-172.17.0.3-1732754361847 (Datanode Uuid 04e962cb-71ed-4ca5-b582-c86dc7adf5d6) service to localhost/127.0.0.1:44423 2024-11-28T00:51:49,171 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T00:51:49,173 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data5/current/BP-823006435-172.17.0.3-1732754361847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:51:49,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data6/current/BP-823006435-172.17.0.3-1732754361847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:51:49,174 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T00:51:49,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b9a68a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:51:49,177 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:51:49,177 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:51:49,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:51:49,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:51:49,179 WARN [BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T00:51:49,179 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T00:51:49,179 WARN [BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-823006435-172.17.0.3-1732754361847 (Datanode Uuid cc7e1181-0799-4a46-b365-fb23278a2771) service to localhost/127.0.0.1:44423 2024-11-28T00:51:49,179 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T00:51:49,180 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data3/current/BP-823006435-172.17.0.3-1732754361847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:51:49,181 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data4/current/BP-823006435-172.17.0.3-1732754361847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:51:49,181 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T00:51:49,183 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64986ec5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:51:49,183 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:51:49,183 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:51:49,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:51:49,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:51:49,185 WARN [BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T00:51:49,185 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T00:51:49,185 WARN [BP-823006435-172.17.0.3-1732754361847 heartbeating to localhost/127.0.0.1:44423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-823006435-172.17.0.3-1732754361847 (Datanode Uuid 82520e50-734f-4a6f-ad48-6d1a83946010) service to localhost/127.0.0.1:44423 2024-11-28T00:51:49,185 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T00:51:49,186 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data1/current/BP-823006435-172.17.0.3-1732754361847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:51:49,186 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/cluster_da3feac4-d904-15e8-8fcd-80d62ee10a4e/data/data2/current/BP-823006435-172.17.0.3-1732754361847 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:51:49,186 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T00:51:49,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5996a1b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T00:51:49,193 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:51:49,194 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:51:49,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:51:49,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/d850fcfc-f553-4163-5289-181f39be34c3/hadoop.log.dir/,STOPPED} 2024-11-28T00:51:49,207 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-28T00:51:49,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down