2024-11-15 11:01:06,588 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-15 11:01:06,635 main DEBUG Took 0.043227 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-15 11:01:06,636 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-15 11:01:06,637 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-15 11:01:06,639 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-15 11:01:06,641 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,652 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-15 11:01:06,673 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,677 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,678 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,679 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,680 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,681 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,681 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,682 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,683 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,684 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,684 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,685 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,685 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,686 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,686 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,691 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,691 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,692 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,694 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,697 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,697 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 11:01:06,698 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,698 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-15 11:01:06,700 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 11:01:06,702 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-15 11:01:06,704 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-15 11:01:06,704 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-15 11:01:06,706 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-15 11:01:06,706 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-15 11:01:06,719 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-15 11:01:06,723 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-15 11:01:06,726 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-15 11:01:06,726 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-15 11:01:06,727 main DEBUG createAppenders(={Console}) 2024-11-15 11:01:06,728 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-11-15 11:01:06,728 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-15 11:01:06,728 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-11-15 11:01:06,729 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-15 11:01:06,730 main DEBUG OutputStream closed 2024-11-15 11:01:06,730 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-15 11:01:06,730 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-15 11:01:06,731 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-11-15 11:01:06,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-15 11:01:06,889 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-15 11:01:06,894 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-15 11:01:06,896 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-15 11:01:06,900 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-15 11:01:06,901 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-15 11:01:06,901 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-15 11:01:06,901 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-15 11:01:06,902 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-15 11:01:06,902 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-15 11:01:06,903 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-15 11:01:06,903 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-15 11:01:06,904 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-15 11:01:06,904 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-15 11:01:06,905 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-15 11:01:06,905 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-15 11:01:06,905 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-15 11:01:06,907 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-15 11:01:06,912 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15 11:01:06,913 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-11-15 11:01:06,913 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-15 11:01:06,914 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-11-15T11:01:06,934 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-15 11:01:06,937 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-15 11:01:06,938 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15T11:01:07,498 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009 2024-11-15T11:01:07,499 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-11-15T11:01:07,580 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-15T11:01:07,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T11:01:07,958 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95, deleteOnExit=true 2024-11-15T11:01:07,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T11:01:07,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/test.cache.data in system properties and HBase conf 2024-11-15T11:01:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:01:07,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:01:07,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:01:07,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:01:07,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:01:08,064 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T11:01:08,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:01:08,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:01:08,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:01:08,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:01:08,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:01:08,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:01:08,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:01:08,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:01:08,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:01:08,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:01:08,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:01:08,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:01:08,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:01:08,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:01:09,132 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-15T11:01:09,247 INFO [Time-limited test {}] log.Log(170): Logging initialized @3749ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-15T11:01:09,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:09,460 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:09,510 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:09,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:09,514 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:01:09,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:09,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@650dba7d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:09,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50f4ffa2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:09,825 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@397082b1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-43319-hadoop-hdfs-3_4_1-tests_jar-_-any-14229506708713373507/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:01:09,839 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5402212{HTTP/1.1, (http/1.1)}{localhost:43319} 2024-11-15T11:01:09,842 INFO [Time-limited test {}] server.Server(415): Started @4346ms 2024-11-15T11:01:10,390 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:10,397 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:10,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:10,405 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:10,405 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:01:10,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52f556e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:10,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3177f68b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:10,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@13c554f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-41363-hadoop-hdfs-3_4_1-tests_jar-_-any-1933806224884731934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:01:10,539 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b85ed6e{HTTP/1.1, (http/1.1)}{localhost:41363} 2024-11-15T11:01:10,539 INFO [Time-limited test {}] server.Server(415): Started @5043ms 2024-11-15T11:01:10,610 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:01:10,817 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:10,832 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:10,855 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:10,855 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:10,856 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:01:10,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35cd15ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:10,871 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1524f378{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:11,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53c777d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-35651-hadoop-hdfs-3_4_1-tests_jar-_-any-9904349935198793217/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:01:11,047 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7026df51{HTTP/1.1, (http/1.1)}{localhost:35651} 2024-11-15T11:01:11,048 INFO [Time-limited test {}] server.Server(415): Started @5552ms 2024-11-15T11:01:11,051 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:01:11,193 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:11,210 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:11,222 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:11,222 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:11,222 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:01:11,229 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19d73582{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:11,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76d904c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:11,266 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3/current/BP-1221769701-172.17.0.2-1731668468836/current, will proceed with Du for space computation calculation, 2024-11-15T11:01:11,269 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4/current/BP-1221769701-172.17.0.2-1731668468836/current, will proceed with Du for space computation calculation, 2024-11-15T11:01:11,280 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1/current/BP-1221769701-172.17.0.2-1731668468836/current, will proceed with Du for space computation calculation, 2024-11-15T11:01:11,289 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2/current/BP-1221769701-172.17.0.2-1731668468836/current, will proceed with Du for space computation calculation, 2024-11-15T11:01:11,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@469f0591{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-38679-hadoop-hdfs-3_4_1-tests_jar-_-any-3896035679780047110/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:01:11,441 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42a22cec{HTTP/1.1, (http/1.1)}{localhost:38679} 2024-11-15T11:01:11,442 INFO [Time-limited test {}] server.Server(415): Started @5945ms 2024-11-15T11:01:11,444 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T11:01:11,489 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:01:11,494 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:01:11,596 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x755be86e76098866 with lease ID 0x91ad82a56e3c2606: Processing first storage report for DS-c18c3b04-ca00-443a-942f-db0d3b7668f3 from datanode DatanodeRegistration(127.0.0.1:42399, datanodeUuid=a15cdfc4-77ba-4018-a9bb-fe1eae10158c, infoPort=43239, infoSecurePort=0, ipcPort=34249, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836) 2024-11-15T11:01:11,597 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x755be86e76098866 with lease ID 0x91ad82a56e3c2606: from storage DS-c18c3b04-ca00-443a-942f-db0d3b7668f3 node DatanodeRegistration(127.0.0.1:42399, datanodeUuid=a15cdfc4-77ba-4018-a9bb-fe1eae10158c, infoPort=43239, infoSecurePort=0, ipcPort=34249, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-15T11:01:11,598 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x275761005878f01b with lease ID 0x91ad82a56e3c2605: Processing first storage report for DS-d2c3abd2-6c40-4063-abef-c89772545457 from datanode DatanodeRegistration(127.0.0.1:38361, datanodeUuid=2ecd9689-13bb-4b14-ae24-35c85d3af906, infoPort=33185, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836) 2024-11-15T11:01:11,598 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x275761005878f01b with lease ID 0x91ad82a56e3c2605: from storage DS-d2c3abd2-6c40-4063-abef-c89772545457 node DatanodeRegistration(127.0.0.1:38361, datanodeUuid=2ecd9689-13bb-4b14-ae24-35c85d3af906, infoPort=33185, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:01:11,598 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x755be86e76098866 with lease ID 0x91ad82a56e3c2606: Processing first storage report for DS-7a2a8a8d-2b92-403e-b4ca-27b8e307a0b6 from datanode DatanodeRegistration(127.0.0.1:42399, datanodeUuid=a15cdfc4-77ba-4018-a9bb-fe1eae10158c, infoPort=43239, infoSecurePort=0, ipcPort=34249, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836) 2024-11-15T11:01:11,599 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x755be86e76098866 with lease ID 0x91ad82a56e3c2606: from storage DS-7a2a8a8d-2b92-403e-b4ca-27b8e307a0b6 node DatanodeRegistration(127.0.0.1:42399, datanodeUuid=a15cdfc4-77ba-4018-a9bb-fe1eae10158c, infoPort=43239, infoSecurePort=0, ipcPort=34249, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:01:11,599 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x275761005878f01b with lease ID 0x91ad82a56e3c2605: Processing first storage report for DS-318955c9-d701-4b91-bf99-a9a80c3ea06e from datanode DatanodeRegistration(127.0.0.1:38361, datanodeUuid=2ecd9689-13bb-4b14-ae24-35c85d3af906, infoPort=33185, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836) 2024-11-15T11:01:11,599 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x275761005878f01b with lease ID 0x91ad82a56e3c2605: from storage DS-318955c9-d701-4b91-bf99-a9a80c3ea06e node DatanodeRegistration(127.0.0.1:38361, datanodeUuid=2ecd9689-13bb-4b14-ae24-35c85d3af906, infoPort=33185, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:01:11,703 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5/current/BP-1221769701-172.17.0.2-1731668468836/current, will proceed with Du for space computation calculation, 2024-11-15T11:01:11,706 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6/current/BP-1221769701-172.17.0.2-1731668468836/current, will proceed with Du for space computation calculation, 2024-11-15T11:01:11,774 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T11:01:11,781 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x37199aff171b028e with lease ID 0x91ad82a56e3c2607: Processing first storage report for DS-12ff82f3-963a-47be-a22e-83c8f228f778 from datanode DatanodeRegistration(127.0.0.1:39395, datanodeUuid=80e11d40-360e-46ae-a031-e0db59912a7f, infoPort=36145, infoSecurePort=0, ipcPort=43923, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836) 2024-11-15T11:01:11,781 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x37199aff171b028e with lease ID 0x91ad82a56e3c2607: from storage DS-12ff82f3-963a-47be-a22e-83c8f228f778 node DatanodeRegistration(127.0.0.1:39395, datanodeUuid=80e11d40-360e-46ae-a031-e0db59912a7f, infoPort=36145, infoSecurePort=0, ipcPort=43923, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T11:01:11,782 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x37199aff171b028e with lease ID 0x91ad82a56e3c2607: Processing first storage report for DS-d055fb2a-cf77-4e4d-b529-bc6d5336ef85 from datanode DatanodeRegistration(127.0.0.1:39395, datanodeUuid=80e11d40-360e-46ae-a031-e0db59912a7f, infoPort=36145, infoSecurePort=0, ipcPort=43923, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836) 2024-11-15T11:01:11,782 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x37199aff171b028e with lease ID 0x91ad82a56e3c2607: from storage DS-d055fb2a-cf77-4e4d-b529-bc6d5336ef85 node DatanodeRegistration(127.0.0.1:39395, datanodeUuid=80e11d40-360e-46ae-a031-e0db59912a7f, infoPort=36145, infoSecurePort=0, ipcPort=43923, storageInfo=lv=-57;cid=testClusterID;nsid=2137082013;c=1731668468836), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T11:01:11,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009 2024-11-15T11:01:12,088 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/zookeeper_0, clientPort=55607, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T11:01:12,100 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55607 2024-11-15T11:01:12,116 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:12,120 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:12,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:01:12,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:01:12,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741825_1001 (size=7) 2024-11-15T11:01:12,840 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 with version=8 2024-11-15T11:01:12,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/hbase-staging 2024-11-15T11:01:12,976 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-15T11:01:13,326 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4cdab6d79b20:0 server-side Connection retries=45 2024-11-15T11:01:13,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:13,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:13,350 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:01:13,351 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:13,351 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:01:13,521 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T11:01:13,592 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-15T11:01:13,604 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-15T11:01:13,609 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:01:13,641 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 13501 (auto-detected) 2024-11-15T11:01:13,642 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-15T11:01:13,669 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33465 2024-11-15T11:01:13,699 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33465 connecting to ZooKeeper ensemble=127.0.0.1:55607 2024-11-15T11:01:13,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:334650x0, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:01:13,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33465-0x101ab9766630000 connected 2024-11-15T11:01:13,785 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:13,789 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:13,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:01:13,810 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7, hbase.cluster.distributed=false 2024-11-15T11:01:13,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:01:13,871 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33465 2024-11-15T11:01:13,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33465 2024-11-15T11:01:13,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33465 2024-11-15T11:01:13,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33465 2024-11-15T11:01:13,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33465 2024-11-15T11:01:14,008 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4cdab6d79b20:0 server-side Connection retries=45 2024-11-15T11:01:14,011 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,011 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,011 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:01:14,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:01:14,015 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:01:14,017 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:01:14,020 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45865 2024-11-15T11:01:14,022 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45865 connecting to ZooKeeper ensemble=127.0.0.1:55607 2024-11-15T11:01:14,023 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:14,025 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:14,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458650x0, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:01:14,032 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45865-0x101ab9766630001 connected 2024-11-15T11:01:14,032 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:01:14,037 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:01:14,046 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:01:14,049 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:01:14,056 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:01:14,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45865 2024-11-15T11:01:14,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45865 2024-11-15T11:01:14,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45865 2024-11-15T11:01:14,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45865 2024-11-15T11:01:14,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45865 2024-11-15T11:01:14,098 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4cdab6d79b20:0 server-side Connection retries=45 2024-11-15T11:01:14,098 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,098 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,099 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:01:14,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:01:14,100 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:01:14,100 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:01:14,101 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36113 2024-11-15T11:01:14,103 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36113 connecting to ZooKeeper ensemble=127.0.0.1:55607 2024-11-15T11:01:14,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:14,107 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:14,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:361130x0, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:01:14,115 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:361130x0, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:01:14,115 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:01:14,116 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36113-0x101ab9766630002 connected 2024-11-15T11:01:14,121 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:01:14,122 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:01:14,125 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:01:14,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36113 2024-11-15T11:01:14,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36113 2024-11-15T11:01:14,132 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36113 2024-11-15T11:01:14,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36113 2024-11-15T11:01:14,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36113 2024-11-15T11:01:14,153 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4cdab6d79b20:0 server-side Connection retries=45 2024-11-15T11:01:14,153 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,153 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,153 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T11:01:14,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T11:01:14,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T11:01:14,154 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T11:01:14,154 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T11:01:14,155 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39207 2024-11-15T11:01:14,158 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39207 connecting to ZooKeeper ensemble=127.0.0.1:55607 2024-11-15T11:01:14,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:14,162 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:14,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392070x0, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T11:01:14,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:392070x0, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:01:14,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39207-0x101ab9766630003 connected 2024-11-15T11:01:14,175 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T11:01:14,179 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T11:01:14,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T11:01:14,183 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T11:01:14,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39207 2024-11-15T11:01:14,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39207 2024-11-15T11:01:14,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39207 2024-11-15T11:01:14,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39207 2024-11-15T11:01:14,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39207 2024-11-15T11:01:14,210 DEBUG [M:0;4cdab6d79b20:33465 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4cdab6d79b20:33465 2024-11-15T11:01:14,211 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:14,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,226 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:14,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:01:14,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:01:14,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T11:01:14,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,264 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T11:01:14,265 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4cdab6d79b20,33465,1731668473053 from backup master directory 2024-11-15T11:01:14,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:14,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T11:01:14,270 WARN [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:01:14,270 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:14,273 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-15T11:01:14,274 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-15T11:01:14,344 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/hbase.id] with ID: 7eec1d4a-af98-41cc-ba5a-34a7b19499cc 2024-11-15T11:01:14,344 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.tmp/hbase.id 2024-11-15T11:01:14,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:01:14,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:01:14,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741826_1002 (size=42) 2024-11-15T11:01:14,364 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.tmp/hbase.id]:[hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/hbase.id] 2024-11-15T11:01:14,415 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T11:01:14,422 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T11:01:14,443 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-15T11:01:14,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:14,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:01:14,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:01:14,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741827_1003 (size=196) 2024-11-15T11:01:14,507 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:01:14,510 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T11:01:14,531 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:01:14,537 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T11:01:14,569 WARN [IPC Server handler 2 on default port 37487 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:01:14,570 WARN [IPC Server handler 2 on default port 37487 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:01:14,570 WARN [IPC Server handler 2 on default port 37487 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:01:14,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:01:14,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:01:14,611 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/data/master/store 2024-11-15T11:01:14,631 WARN [IPC Server handler 0 on default port 37487 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:01:14,631 WARN [IPC Server handler 0 on default port 37487 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:01:14,631 WARN [IPC Server handler 0 on default port 37487 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:01:14,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:01:14,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:01:14,648 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-15T11:01:14,652 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:14,653 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:01:14,653 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:01:14,654 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:01:14,656 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:01:14,656 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:01:14,656 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:01:14,658 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731668474653Disabling compacts and flushes for region at 1731668474653Disabling writes for close at 1731668474656 (+3 ms)Writing region close event to WAL at 1731668474656Closed at 1731668474656 2024-11-15T11:01:14,660 WARN [master/4cdab6d79b20:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/data/master/store/.initializing 2024-11-15T11:01:14,660 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:14,670 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T11:01:14,687 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4cdab6d79b20%2C33465%2C1731668473053, suffix=, logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053, archiveDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/oldWALs, maxLogs=10 2024-11-15T11:01:14,714 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693, exclude list is [], retry=0 2024-11-15T11:01:14,717 WARN [IPC Server handler 4 on default port 37487 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T11:01:14,718 WARN [IPC Server handler 4 on default port 37487 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T11:01:14,718 WARN [IPC Server handler 4 on default port 37487 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T11:01:14,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39395,DS-12ff82f3-963a-47be-a22e-83c8f228f778,DISK] 2024-11-15T11:01:14,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38361,DS-d2c3abd2-6c40-4063-abef-c89772545457,DISK] 2024-11-15T11:01:14,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-15T11:01:14,783 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693 2024-11-15T11:01:14,785 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33185:33185),(127.0.0.1/127.0.0.1:36145:36145)] 2024-11-15T11:01:14,785 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:01:14,786 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:14,790 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,791 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T11:01:14,874 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:14,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:14,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T11:01:14,895 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:14,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:14,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T11:01:14,900 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:14,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:14,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T11:01:14,907 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:14,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:14,909 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,913 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,915 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,921 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,922 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,927 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:01:14,932 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T11:01:14,941 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:14,942 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72110635, jitterRate=0.07453219592571259}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:01:14,950 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731668474808Initializing all the Stores at 1731668474810 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668474811 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668474812 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668474813 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668474813Cleaning up temporary data from old regions at 1731668474922 (+109 ms)Region opened successfully at 1731668474950 (+28 ms) 2024-11-15T11:01:14,952 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T11:01:14,991 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@338c489, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4cdab6d79b20/172.17.0.2:0 2024-11-15T11:01:15,028 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T11:01:15,042 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T11:01:15,042 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T11:01:15,046 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T11:01:15,047 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-15T11:01:15,052 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-15T11:01:15,052 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T11:01:15,085 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T11:01:15,097 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T11:01:15,100 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T11:01:15,103 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T11:01:15,107 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T11:01:15,109 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T11:01:15,112 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T11:01:15,116 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T11:01:15,118 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T11:01:15,120 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T11:01:15,122 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T11:01:15,145 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T11:01:15,152 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T11:01:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,165 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4cdab6d79b20,33465,1731668473053, sessionid=0x101ab9766630000, setting cluster-up flag (Was=false) 2024-11-15T11:01:15,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,193 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T11:01:15,196 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:15,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:15,227 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T11:01:15,229 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:15,239 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T11:01:15,293 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-15T11:01:15,298 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(746): ClusterId : 7eec1d4a-af98-41cc-ba5a-34a7b19499cc 2024-11-15T11:01:15,301 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(746): ClusterId : 7eec1d4a-af98-41cc-ba5a-34a7b19499cc 2024-11-15T11:01:15,301 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(746): ClusterId : 7eec1d4a-af98-41cc-ba5a-34a7b19499cc 2024-11-15T11:01:15,302 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:15,303 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-15T11:01:15,305 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:01:15,305 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:01:15,305 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T11:01:15,313 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:01:15,313 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:01:15,314 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:01:15,314 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:01:15,314 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T11:01:15,314 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T11:01:15,319 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:01:15,319 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:01:15,319 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T11:01:15,320 DEBUG [RS:0;4cdab6d79b20:45865 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f68d007, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4cdab6d79b20/172.17.0.2:0 2024-11-15T11:01:15,320 DEBUG [RS:1;4cdab6d79b20:36113 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ae4409, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4cdab6d79b20/172.17.0.2:0 2024-11-15T11:01:15,323 DEBUG [RS:2;4cdab6d79b20:39207 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bfc4d46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4cdab6d79b20/172.17.0.2:0 2024-11-15T11:01:15,337 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4cdab6d79b20:45865 2024-11-15T11:01:15,342 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:01:15,342 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:01:15,343 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-15T11:01:15,343 INFO [RS:0;4cdab6d79b20:45865 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:15,343 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:01:15,346 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(2659): reportForDuty to master=4cdab6d79b20,33465,1731668473053 with port=45865, startcode=1731668473964 2024-11-15T11:01:15,362 DEBUG [RS:0;4cdab6d79b20:45865 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:01:15,371 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;4cdab6d79b20:39207 2024-11-15T11:01:15,372 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:01:15,372 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:01:15,372 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-15T11:01:15,372 INFO [RS:2;4cdab6d79b20:39207 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:15,372 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:01:15,375 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;4cdab6d79b20:36113 2024-11-15T11:01:15,376 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T11:01:15,376 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T11:01:15,376 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-15T11:01:15,376 INFO [RS:1;4cdab6d79b20:36113 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:15,377 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T11:01:15,376 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T11:01:15,377 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(2659): reportForDuty to master=4cdab6d79b20,33465,1731668473053 with port=39207, startcode=1731668474152 2024-11-15T11:01:15,378 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(2659): reportForDuty to master=4cdab6d79b20,33465,1731668473053 with port=36113, startcode=1731668474097 2024-11-15T11:01:15,378 DEBUG [RS:1;4cdab6d79b20:36113 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:01:15,379 DEBUG [RS:2;4cdab6d79b20:39207 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T11:01:15,389 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T11:01:15,397 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T11:01:15,406 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4cdab6d79b20,33465,1731668473053 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T11:01:15,422 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4cdab6d79b20:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:01:15,422 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4cdab6d79b20:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:01:15,422 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4cdab6d79b20:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:01:15,423 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4cdab6d79b20:0, corePoolSize=5, maxPoolSize=5 2024-11-15T11:01:15,423 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4cdab6d79b20:0, corePoolSize=10, maxPoolSize=10 2024-11-15T11:01:15,423 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,423 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4cdab6d79b20:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:01:15,423 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,433 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51009, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:01:15,433 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56871, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:01:15,434 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46423, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T11:01:15,437 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:01:15,438 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T11:01:15,438 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731668505438 2024-11-15T11:01:15,442 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-15T11:01:15,442 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T11:01:15,444 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T11:01:15,448 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T11:01:15,449 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T11:01:15,449 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T11:01:15,449 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T11:01:15,452 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-15T11:01:15,454 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:15,454 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-15T11:01:15,454 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:01:15,455 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,463 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T11:01:15,465 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T11:01:15,465 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T11:01:15,480 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T11:01:15,480 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T11:01:15,486 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-15T11:01:15,486 WARN [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-15T11:01:15,486 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-15T11:01:15,486 WARN [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-15T11:01:15,487 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-15T11:01:15,487 WARN [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-15T11:01:15,495 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4cdab6d79b20:0:becomeActiveMaster-HFileCleaner.large.0-1731668475482,5,FailOnTimeoutGroup] 2024-11-15T11:01:15,504 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4cdab6d79b20:0:becomeActiveMaster-HFileCleaner.small.0-1731668475495,5,FailOnTimeoutGroup] 2024-11-15T11:01:15,504 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,504 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T11:01:15,506 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,507 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:01:15,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:01:15,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741831_1007 (size=1321) 2024-11-15T11:01:15,554 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T11:01:15,554 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:15,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:01:15,588 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(2659): reportForDuty to master=4cdab6d79b20,33465,1731668473053 with port=36113, startcode=1731668474097 2024-11-15T11:01:15,589 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(2659): reportForDuty to master=4cdab6d79b20,33465,1731668473053 with port=45865, startcode=1731668473964 2024-11-15T11:01:15,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:01:15,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741832_1008 (size=32) 2024-11-15T11:01:15,591 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(2659): reportForDuty to master=4cdab6d79b20,33465,1731668473053 with port=39207, startcode=1731668474152 2024-11-15T11:01:15,592 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:15,594 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:15,596 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] master.ServerManager(517): Registering regionserver=4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:15,612 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:15,612 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:15,612 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37487 2024-11-15T11:01:15,612 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:01:15,612 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] master.ServerManager(517): Registering regionserver=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:15,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:01:15,618 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:01:15,618 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:15,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:15,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:01:15,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:01:15,622 DEBUG [RS:0;4cdab6d79b20:45865 {}] zookeeper.ZKUtil(111): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:15,622 WARN [RS:0;4cdab6d79b20:45865 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:01:15,623 INFO [RS:0;4cdab6d79b20:45865 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T11:01:15,623 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:15,624 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:15,632 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:01:15,624 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465 {}] master.ServerManager(517): Registering regionserver=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:15,632 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:15,632 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:15,633 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37487 2024-11-15T11:01:15,633 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:01:15,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:15,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:01:15,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:01:15,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:15,641 DEBUG [RS:2;4cdab6d79b20:39207 {}] zookeeper.ZKUtil(111): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:15,641 WARN [RS:2;4cdab6d79b20:39207 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:01:15,642 INFO [RS:2;4cdab6d79b20:39207 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T11:01:15,642 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:15,642 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:15,642 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37487 2024-11-15T11:01:15,642 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T11:01:15,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:15,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:01:15,645 DEBUG [RS:1;4cdab6d79b20:36113 {}] zookeeper.ZKUtil(111): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:15,645 WARN [RS:1;4cdab6d79b20:36113 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T11:01:15,645 INFO [RS:1;4cdab6d79b20:36113 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T11:01:15,646 DEBUG [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:15,646 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:01:15,646 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:15,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:15,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:01:15,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740 2024-11-15T11:01:15,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740 2024-11-15T11:01:15,650 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4cdab6d79b20,36113,1731668474097] 2024-11-15T11:01:15,650 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4cdab6d79b20,39207,1731668474152] 2024-11-15T11:01:15,650 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4cdab6d79b20,45865,1731668473964] 2024-11-15T11:01:15,654 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:01:15,654 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:01:15,655 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:01:15,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:01:15,691 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:15,692 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62107150, jitterRate=-0.07453134655952454}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:01:15,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731668475594Initializing all the Stores at 1731668475603 (+9 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668475604 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668475614 (+10 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668475614Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668475614Cleaning up temporary data from old regions at 1731668475654 (+40 ms)Region opened successfully at 1731668475695 (+41 ms) 2024-11-15T11:01:15,695 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:01:15,695 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:01:15,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:01:15,695 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T11:01:15,695 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:01:15,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:01:15,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:01:15,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:01:15,697 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:01:15,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731668475695Disabling compacts and flushes for region at 1731668475695Disabling writes for close at 1731668475696 (+1 ms)Writing region close event to WAL at 1731668475697 (+1 ms)Closed at 1731668475697 2024-11-15T11:01:15,700 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:01:15,700 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T11:01:15,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T11:01:15,717 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:01:15,717 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:01:15,717 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T11:01:15,719 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:01:15,722 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T11:01:15,726 INFO [RS:1;4cdab6d79b20:36113 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:01:15,726 INFO [RS:0;4cdab6d79b20:45865 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:01:15,726 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,726 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,727 INFO [RS:2;4cdab6d79b20:39207 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T11:01:15,727 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,729 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:01:15,729 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:01:15,729 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T11:01:15,735 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:01:15,735 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:01:15,735 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T11:01:15,737 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,737 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,737 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,737 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,737 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,738 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,738 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,738 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,738 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4cdab6d79b20:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:01:15,738 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4cdab6d79b20:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:01:15,738 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4cdab6d79b20:0, corePoolSize=2, maxPoolSize=2 2024-11-15T11:01:15,740 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,740 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,741 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,741 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,741 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,741 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,741 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:01:15,741 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4cdab6d79b20:0, corePoolSize=1, maxPoolSize=1 2024-11-15T11:01:15,741 DEBUG [RS:0;4cdab6d79b20:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4cdab6d79b20:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:01:15,741 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:01:15,741 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:01:15,741 DEBUG [RS:1;4cdab6d79b20:36113 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4cdab6d79b20:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:01:15,741 DEBUG [RS:2;4cdab6d79b20:39207 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4cdab6d79b20:0, corePoolSize=3, maxPoolSize=3 2024-11-15T11:01:15,747 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,45865,1731668473964-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,748 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,749 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,749 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,36113,1731668474097-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:01:15,749 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,749 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,39207,1731668474152-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:01:15,772 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:01:15,775 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:01:15,775 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,36113,1731668474097-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,776 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,776 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.Replication(171): 4cdab6d79b20,36113,1731668474097 started 2024-11-15T11:01:15,780 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T11:01:15,780 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,45865,1731668473964-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,775 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,39207,1731668474152-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,781 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,781 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,781 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.Replication(171): 4cdab6d79b20,39207,1731668474152 started 2024-11-15T11:01:15,782 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.Replication(171): 4cdab6d79b20,45865,1731668473964 started 2024-11-15T11:01:15,812 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,812 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,812 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:15,813 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1482): Serving as 4cdab6d79b20,39207,1731668474152, RpcServer on 4cdab6d79b20/172.17.0.2:39207, sessionid=0x101ab9766630003 2024-11-15T11:01:15,813 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(1482): Serving as 4cdab6d79b20,36113,1731668474097, RpcServer on 4cdab6d79b20/172.17.0.2:36113, sessionid=0x101ab9766630002 2024-11-15T11:01:15,813 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1482): Serving as 4cdab6d79b20,45865,1731668473964, RpcServer on 4cdab6d79b20/172.17.0.2:45865, sessionid=0x101ab9766630001 2024-11-15T11:01:15,814 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:01:15,814 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:01:15,814 DEBUG [RS:0;4cdab6d79b20:45865 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:15,814 DEBUG [RS:1;4cdab6d79b20:36113 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:15,815 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4cdab6d79b20,45865,1731668473964' 2024-11-15T11:01:15,815 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4cdab6d79b20,36113,1731668474097' 2024-11-15T11:01:15,815 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:01:15,815 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:01:15,815 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T11:01:15,815 DEBUG [RS:2;4cdab6d79b20:39207 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:15,815 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4cdab6d79b20,39207,1731668474152' 2024-11-15T11:01:15,815 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T11:01:15,816 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:01:15,817 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:01:15,817 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:01:15,817 DEBUG [RS:2;4cdab6d79b20:39207 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:15,817 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4cdab6d79b20,39207,1731668474152' 2024-11-15T11:01:15,817 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:01:15,818 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:01:15,818 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:01:15,818 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:01:15,818 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:01:15,818 DEBUG [RS:1;4cdab6d79b20:36113 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:15,819 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4cdab6d79b20,36113,1731668474097' 2024-11-15T11:01:15,819 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:01:15,819 DEBUG [RS:2;4cdab6d79b20:39207 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:01:15,819 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:01:15,819 INFO [RS:2;4cdab6d79b20:39207 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:01:15,820 INFO [RS:2;4cdab6d79b20:39207 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:01:15,820 DEBUG [RS:1;4cdab6d79b20:36113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:01:15,820 INFO [RS:1;4cdab6d79b20:36113 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:01:15,820 INFO [RS:1;4cdab6d79b20:36113 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:01:15,825 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T11:01:15,826 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T11:01:15,826 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T11:01:15,827 DEBUG [RS:0;4cdab6d79b20:45865 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:15,827 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4cdab6d79b20,45865,1731668473964' 2024-11-15T11:01:15,827 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T11:01:15,827 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T11:01:15,829 DEBUG [RS:0;4cdab6d79b20:45865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T11:01:15,829 INFO [RS:0;4cdab6d79b20:45865 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T11:01:15,829 INFO [RS:0;4cdab6d79b20:45865 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T11:01:15,873 WARN [4cdab6d79b20:33465 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T11:01:15,925 INFO [RS:1;4cdab6d79b20:36113 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T11:01:15,925 INFO [RS:2;4cdab6d79b20:39207 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T11:01:15,928 INFO [RS:1;4cdab6d79b20:36113 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4cdab6d79b20%2C36113%2C1731668474097, suffix=, logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,36113,1731668474097, archiveDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs, maxLogs=32 2024-11-15T11:01:15,929 INFO [RS:2;4cdab6d79b20:39207 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4cdab6d79b20%2C39207%2C1731668474152, suffix=, logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,39207,1731668474152, archiveDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs, maxLogs=32 2024-11-15T11:01:15,930 INFO [RS:0;4cdab6d79b20:45865 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T11:01:15,933 INFO [RS:0;4cdab6d79b20:45865 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4cdab6d79b20%2C45865%2C1731668473964, suffix=, logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,45865,1731668473964, archiveDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs, maxLogs=32 2024-11-15T11:01:15,947 DEBUG [RS:2;4cdab6d79b20:39207 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,39207,1731668474152/4cdab6d79b20%2C39207%2C1731668474152.1731668475932, exclude list is [], retry=0 2024-11-15T11:01:15,947 DEBUG [RS:1;4cdab6d79b20:36113 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,36113,1731668474097/4cdab6d79b20%2C36113%2C1731668474097.1731668475932, exclude list is [], retry=0 2024-11-15T11:01:15,950 DEBUG [RS:0;4cdab6d79b20:45865 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,45865,1731668473964/4cdab6d79b20%2C45865%2C1731668473964.1731668475935, exclude list is [], retry=0 2024-11-15T11:01:15,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38361,DS-d2c3abd2-6c40-4063-abef-c89772545457,DISK] 2024-11-15T11:01:15,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42399,DS-c18c3b04-ca00-443a-942f-db0d3b7668f3,DISK] 2024-11-15T11:01:15,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39395,DS-12ff82f3-963a-47be-a22e-83c8f228f778,DISK] 2024-11-15T11:01:15,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38361,DS-d2c3abd2-6c40-4063-abef-c89772545457,DISK] 2024-11-15T11:01:15,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39395,DS-12ff82f3-963a-47be-a22e-83c8f228f778,DISK] 2024-11-15T11:01:15,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42399,DS-c18c3b04-ca00-443a-942f-db0d3b7668f3,DISK] 2024-11-15T11:01:15,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39395,DS-12ff82f3-963a-47be-a22e-83c8f228f778,DISK] 2024-11-15T11:01:15,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42399,DS-c18c3b04-ca00-443a-942f-db0d3b7668f3,DISK] 2024-11-15T11:01:15,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38361,DS-d2c3abd2-6c40-4063-abef-c89772545457,DISK] 2024-11-15T11:01:15,986 INFO [RS:1;4cdab6d79b20:36113 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,36113,1731668474097/4cdab6d79b20%2C36113%2C1731668474097.1731668475932 2024-11-15T11:01:15,986 INFO [RS:2;4cdab6d79b20:39207 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,39207,1731668474152/4cdab6d79b20%2C39207%2C1731668474152.1731668475932 2024-11-15T11:01:15,986 INFO [RS:0;4cdab6d79b20:45865 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,45865,1731668473964/4cdab6d79b20%2C45865%2C1731668473964.1731668475935 2024-11-15T11:01:15,986 DEBUG [RS:1;4cdab6d79b20:36113 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43239:43239),(127.0.0.1/127.0.0.1:36145:36145),(127.0.0.1/127.0.0.1:33185:33185)] 2024-11-15T11:01:15,986 DEBUG [RS:2;4cdab6d79b20:39207 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33185:33185),(127.0.0.1/127.0.0.1:43239:43239),(127.0.0.1/127.0.0.1:36145:36145)] 2024-11-15T11:01:15,987 DEBUG [RS:0;4cdab6d79b20:45865 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33185:33185),(127.0.0.1/127.0.0.1:43239:43239),(127.0.0.1/127.0.0.1:36145:36145)] 2024-11-15T11:01:16,126 DEBUG [4cdab6d79b20:33465 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-15T11:01:16,134 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:01:16,141 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:01:16,142 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:01:16,142 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:01:16,142 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:01:16,142 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:01:16,142 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:01:16,142 INFO [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:01:16,142 INFO [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:01:16,142 INFO [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:01:16,142 DEBUG [4cdab6d79b20:33465 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:01:16,153 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:16,162 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4cdab6d79b20,39207,1731668474152, state=OPENING 2024-11-15T11:01:16,169 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T11:01:16,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:16,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:16,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:16,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:16,173 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,173 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,174 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,174 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,177 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T11:01:16,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:01:16,364 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:01:16,367 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50533, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:01:16,383 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T11:01:16,384 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T11:01:16,385 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-15T11:01:16,389 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4cdab6d79b20%2C39207%2C1731668474152.meta, suffix=.meta, logDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,39207,1731668474152, archiveDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs, maxLogs=32 2024-11-15T11:01:16,413 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,39207,1731668474152/4cdab6d79b20%2C39207%2C1731668474152.meta.1731668476391.meta, exclude list is [], retry=0 2024-11-15T11:01:16,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38361,DS-d2c3abd2-6c40-4063-abef-c89772545457,DISK] 2024-11-15T11:01:16,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39395,DS-12ff82f3-963a-47be-a22e-83c8f228f778,DISK] 2024-11-15T11:01:16,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42399,DS-c18c3b04-ca00-443a-942f-db0d3b7668f3,DISK] 2024-11-15T11:01:16,428 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/WALs/4cdab6d79b20,39207,1731668474152/4cdab6d79b20%2C39207%2C1731668474152.meta.1731668476391.meta 2024-11-15T11:01:16,430 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33185:33185),(127.0.0.1/127.0.0.1:43239:43239),(127.0.0.1/127.0.0.1:36145:36145)] 2024-11-15T11:01:16,430 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:01:16,432 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-15T11:01:16,433 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:16,434 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T11:01:16,436 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T11:01:16,438 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T11:01:16,447 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T11:01:16,447 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:16,447 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T11:01:16,448 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T11:01:16,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T11:01:16,453 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T11:01:16,453 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:16,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:16,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T11:01:16,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T11:01:16,456 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:16,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:16,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T11:01:16,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T11:01:16,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:16,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:16,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T11:01:16,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T11:01:16,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:16,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T11:01:16,463 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T11:01:16,464 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740 2024-11-15T11:01:16,467 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740 2024-11-15T11:01:16,470 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T11:01:16,470 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T11:01:16,471 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T11:01:16,475 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T11:01:16,477 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75172957, jitterRate=0.12016434967517853}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T11:01:16,477 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T11:01:16,481 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731668476448Writing region info on filesystem at 1731668476448Initializing all the Stores at 1731668476450 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668476450Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668476451 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668476451Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668476451Cleaning up temporary data from old regions at 1731668476470 (+19 ms)Running coprocessor post-open hooks at 1731668476477 (+7 ms)Region opened successfully at 1731668476481 (+4 ms) 2024-11-15T11:01:16,491 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731668476351 2024-11-15T11:01:16,511 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:16,514 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4cdab6d79b20,39207,1731668474152, state=OPEN 2024-11-15T11:01:16,515 DEBUG [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T11:01:16,515 INFO [RS_OPEN_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T11:01:16,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:01:16,518 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:01:16,518 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:01:16,518 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T11:01:16,520 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T11:01:16,520 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:16,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T11:01:16,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4cdab6d79b20,39207,1731668474152 in 342 msec 2024-11-15T11:01:16,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T11:01:16,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 826 msec 2024-11-15T11:01:16,543 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T11:01:16,543 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T11:01:16,567 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:16,569 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:16,594 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:16,597 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49613, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:16,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3130 sec 2024-11-15T11:01:16,625 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731668476624, completionTime=-1 2024-11-15T11:01:16,629 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-15T11:01:16,629 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T11:01:16,669 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-15T11:01:16,669 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731668536669 2024-11-15T11:01:16,669 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731668596669 2024-11-15T11:01:16,669 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 40 msec 2024-11-15T11:01:16,671 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:01:16,681 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,33465,1731668473053-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:16,681 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,33465,1731668473053-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:16,682 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,33465,1731668473053-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:16,684 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4cdab6d79b20:33465, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:16,694 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T11:01:16,695 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:16,698 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:16,740 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.469sec 2024-11-15T11:01:16,754 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T11:01:16,755 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T11:01:16,757 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T11:01:16,758 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T11:01:16,758 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T11:01:16,759 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,33465,1731668473053-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T11:01:16,760 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,33465,1731668473053-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T11:01:16,797 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T11:01:16,798 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:16,801 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3c154550 2024-11-15T11:01:16,803 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T11:01:16,808 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40379, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T11:01:16,826 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T11:01:16,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-15T11:01:16,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@518b8158, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:16,888 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:01:16,889 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-15T11:01:16,889 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-15T11:01:16,890 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:16,891 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-15T11:01:16,894 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:16,894 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:01:16,897 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:16,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:01:16,908 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:16,911 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:16,911 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:16,914 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74c6ee9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:16,914 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:16,918 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:16,931 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59560, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:16,935 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:16,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@569082ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:16,936 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:16,945 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:16,946 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:16,955 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:16,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:16,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-15T11:01:16,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/test.cache.data in system properties and HBase conf 2024-11-15T11:01:16,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T11:01:16,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir in system properties and HBase conf 2024-11-15T11:01:16,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T11:01:16,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T11:01:16,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T11:01:16,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:01:16,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T11:01:16,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T11:01:16,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:01:16,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/nfs.dump.dir in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T11:01:16,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T11:01:16,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T11:01:17,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741837_1013 (size=349) 2024-11-15T11:01:17,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741837_1013 (size=349) 2024-11-15T11:01:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741837_1013 (size=349) 2024-11-15T11:01:17,018 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 28dfeddf0af1b0d2554866c3d938dd83, NAME => 'hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:17,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:01:17,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741838_1014 (size=36) 2024-11-15T11:01:17,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741838_1014 (size=36) 2024-11-15T11:01:17,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741838_1014 (size=36) 2024-11-15T11:01:17,078 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:17,078 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 28dfeddf0af1b0d2554866c3d938dd83, disabling compactions & flushes 2024-11-15T11:01:17,078 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:01:17,078 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:01:17,078 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. after waiting 0 ms 2024-11-15T11:01:17,078 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:01:17,079 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:01:17,079 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 28dfeddf0af1b0d2554866c3d938dd83: Waiting for close lock at 1731668477078Disabling compacts and flushes for region at 1731668477078Disabling writes for close at 1731668477078Writing region close event to WAL at 1731668477078Closed at 1731668477078 2024-11-15T11:01:17,082 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:01:17,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741839_1015 (size=592039) 2024-11-15T11:01:17,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741839_1015 (size=592039) 2024-11-15T11:01:17,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741839_1015 (size=592039) 2024-11-15T11:01:17,093 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1731668477084"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668477084"}]},"ts":"1731668477084"} 2024-11-15T11:01:17,107 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T11:01:17,114 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:01:17,119 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668477114"}]},"ts":"1731668477114"} 2024-11-15T11:01:17,137 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-15T11:01:17,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:01:17,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:01:17,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:01:17,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:01:17,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:01:17,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:01:17,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:01:17,141 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:01:17,141 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:01:17,141 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:01:17,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:01:17,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=28dfeddf0af1b0d2554866c3d938dd83, ASSIGN}] 2024-11-15T11:01:17,148 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=28dfeddf0af1b0d2554866c3d938dd83, ASSIGN 2024-11-15T11:01:17,152 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=28dfeddf0af1b0d2554866c3d938dd83, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:01:17,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741840_1016 (size=1663647) 2024-11-15T11:01:17,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741840_1016 (size=1663647) 2024-11-15T11:01:17,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741840_1016 (size=1663647) 2024-11-15T11:01:17,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:01:17,305 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T11:01:17,306 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=28dfeddf0af1b0d2554866c3d938dd83, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:17,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=28dfeddf0af1b0d2554866c3d938dd83, ASSIGN because future has completed 2024-11-15T11:01:17,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28dfeddf0af1b0d2554866c3d938dd83, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:01:17,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:01:17,551 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:01:17,551 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 28dfeddf0af1b0d2554866c3d938dd83, NAME => 'hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:01:17,552 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. service=AccessControlService 2024-11-15T11:01:17,552 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:17,553 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,553 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:17,553 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,553 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,561 INFO [StoreOpener-28dfeddf0af1b0d2554866c3d938dd83-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,565 INFO [StoreOpener-28dfeddf0af1b0d2554866c3d938dd83-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28dfeddf0af1b0d2554866c3d938dd83 columnFamilyName l 2024-11-15T11:01:17,565 DEBUG [StoreOpener-28dfeddf0af1b0d2554866c3d938dd83-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:17,571 INFO [StoreOpener-28dfeddf0af1b0d2554866c3d938dd83-1 {}] regionserver.HStore(327): Store=28dfeddf0af1b0d2554866c3d938dd83/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:17,580 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,583 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,586 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,589 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,589 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,596 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,607 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:17,608 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 28dfeddf0af1b0d2554866c3d938dd83; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67039526, jitterRate=-0.0010332167148590088}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:01:17,609 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:01:17,612 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 28dfeddf0af1b0d2554866c3d938dd83: Running coprocessor pre-open hook at 1731668477553Writing region info on filesystem at 1731668477554 (+1 ms)Initializing all the Stores at 1731668477557 (+3 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731668477557Cleaning up temporary data from old regions at 1731668477590 (+33 ms)Running coprocessor post-open hooks at 1731668477609 (+19 ms)Region opened successfully at 1731668477612 (+3 ms) 2024-11-15T11:01:17,617 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., pid=6, masterSystemTime=1731668477503 2024-11-15T11:01:17,624 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=28dfeddf0af1b0d2554866c3d938dd83, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:17,625 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:01:17,625 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:01:17,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28dfeddf0af1b0d2554866c3d938dd83, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:01:17,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T11:01:17,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 28dfeddf0af1b0d2554866c3d938dd83, server=4cdab6d79b20,39207,1731668474152 in 326 msec 2024-11-15T11:01:17,655 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T11:01:17,655 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=28dfeddf0af1b0d2554866c3d938dd83, ASSIGN in 505 msec 2024-11-15T11:01:17,658 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:01:17,658 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668477658"}]},"ts":"1731668477658"} 2024-11-15T11:01:17,663 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-15T11:01:17,666 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:01:17,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 836 msec 2024-11-15T11:01:17,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741828_1004 (size=1189) 2024-11-15T11:01:17,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741829_1005 (size=34) 2024-11-15T11:01:18,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T11:01:18,057 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-15T11:01:18,080 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T11:01:18,082 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T11:01:18,082 INFO [master/4cdab6d79b20:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4cdab6d79b20,33465,1731668473053-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T11:01:19,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:19,034 WARN [Thread-381 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:19,403 INFO [Thread-381 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:19,403 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-15T11:01:19,404 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:19,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:19,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:19,430 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:01:19,432 INFO [Thread-381 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:19,432 INFO [Thread-381 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:19,432 INFO [Thread-381 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T11:01:19,438 INFO [Thread-381 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d74461e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:19,439 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:19,439 INFO [Thread-381 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15d24492{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:19,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c6effca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:19,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2096f1fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:19,613 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-15T11:01:19,613 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-15T11:01:19,613 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-15T11:01:19,616 INFO [Thread-381 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-15T11:01:19,680 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:20,153 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:20,711 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:20,738 INFO [Thread-381 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2dc7160a{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-43295-hadoop-yarn-common-3_4_1_jar-_-any-4071427352442543461/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-15T11:01:20,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68d6c8c{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-43763-hadoop-yarn-common-3_4_1_jar-_-any-5712975023110815490/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-15T11:01:20,739 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2365d1eb{HTTP/1.1, (http/1.1)}{localhost:43763} 2024-11-15T11:01:20,739 INFO [Time-limited test {}] server.Server(415): Started @15243ms 2024-11-15T11:01:20,742 INFO [Thread-381 {}] server.AbstractConnector(333): Started ServerConnector@7d7cc728{HTTP/1.1, (http/1.1)}{localhost:43295} 2024-11-15T11:01:20,742 INFO [Thread-381 {}] server.Server(415): Started @15246ms 2024-11-15T11:01:20,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741841_1017 (size=5) 2024-11-15T11:01:20,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741841_1017 (size=5) 2024-11-15T11:01:20,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741841_1017 (size=5) 2024-11-15T11:01:21,751 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-15T11:01:21,758 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:21,828 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-15T11:01:21,829 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:21,854 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:21,855 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:21,855 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:01:21,856 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:21,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e117a92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:21,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70de798d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:21,929 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-15T11:01:21,929 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-15T11:01:21,929 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-15T11:01:21,929 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-15T11:01:21,952 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:21,967 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:01:21,984 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:22,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T11:01:22,106 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-15T11:01:22,159 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:22,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@487b2ed9{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-45637-hadoop-yarn-common-3_4_1_jar-_-any-4617370303166858626/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T11:01:22,174 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a545a45{HTTP/1.1, (http/1.1)}{localhost:45637} 2024-11-15T11:01:22,174 INFO [Time-limited test {}] server.Server(415): Started @16678ms 2024-11-15T11:01:22,383 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-15T11:01:22,386 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:22,399 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-15T11:01:22,399 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T11:01:22,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T11:01:22,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T11:01:22,411 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T11:01:22,412 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T11:01:22,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7821ccae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,AVAILABLE} 2024-11-15T11:01:22,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40f9fdc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T11:01:22,480 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-15T11:01:22,481 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-15T11:01:22,481 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-15T11:01:22,481 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-15T11:01:22,491 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:22,497 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:22,646 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T11:01:22,654 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3bfaa534{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/java.io.tmpdir/jetty-localhost-41609-hadoop-yarn-common-3_4_1_jar-_-any-1195100586475592535/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T11:01:22,655 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3daaeea9{HTTP/1.1, (http/1.1)}{localhost:41609} 2024-11-15T11:01:22,656 INFO [Time-limited test {}] server.Server(415): Started @17160ms 2024-11-15T11:01:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-15T11:01:22,693 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:22,724 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=720, OpenFileDescriptor=778, MaxFileDescriptor=1048576, SystemLoadAverage=397, ProcessCount=11, AvailableMemoryMB=4641 2024-11-15T11:01:22,730 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=720 is superior to 500 2024-11-15T11:01:22,736 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T11:01:22,742 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 4cdab6d79b20,33465,1731668473053 2024-11-15T11:01:22,742 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2ca0936f 2024-11-15T11:01:22,743 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T11:01:22,746 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T11:01:22,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:01:22,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:22,758 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:01:22,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-15T11:01:22,760 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:22,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T11:01:22,762 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:01:22,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741842_1018 (size=406) 2024-11-15T11:01:22,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741842_1018 (size=406) 2024-11-15T11:01:22,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741842_1018 (size=406) 2024-11-15T11:01:22,785 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4ddca89ec1344d996622b72ea235ecfc, NAME => 'testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:22,786 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e45f0f459cfa1b2a327d167c20ebf16b, NAME => 'testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:22,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741843_1019 (size=67) 2024-11-15T11:01:22,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741843_1019 (size=67) 2024-11-15T11:01:22,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741843_1019 (size=67) 2024-11-15T11:01:22,821 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:22,821 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 4ddca89ec1344d996622b72ea235ecfc, disabling compactions & flushes 2024-11-15T11:01:22,821 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:22,821 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:22,821 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. after waiting 0 ms 2024-11-15T11:01:22,821 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:22,821 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:22,821 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4ddca89ec1344d996622b72ea235ecfc: Waiting for close lock at 1731668482821Disabling compacts and flushes for region at 1731668482821Disabling writes for close at 1731668482821Writing region close event to WAL at 1731668482821Closed at 1731668482821 2024-11-15T11:01:22,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741844_1020 (size=67) 2024-11-15T11:01:22,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741844_1020 (size=67) 2024-11-15T11:01:22,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741844_1020 (size=67) 2024-11-15T11:01:22,838 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:22,838 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing e45f0f459cfa1b2a327d167c20ebf16b, disabling compactions & flushes 2024-11-15T11:01:22,838 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:22,838 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:22,838 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. after waiting 0 ms 2024-11-15T11:01:22,838 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:22,838 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:22,839 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for e45f0f459cfa1b2a327d167c20ebf16b: Waiting for close lock at 1731668482838Disabling compacts and flushes for region at 1731668482838Disabling writes for close at 1731668482838Writing region close event to WAL at 1731668482838Closed at 1731668482838 2024-11-15T11:01:22,843 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:01:22,843 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731668482843"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668482843"}]},"ts":"1731668482843"} 2024-11-15T11:01:22,843 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731668482843"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668482843"}]},"ts":"1731668482843"} 2024-11-15T11:01:22,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T11:01:22,884 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:01:22,886 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:01:22,886 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668482886"}]},"ts":"1731668482886"} 2024-11-15T11:01:22,890 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-15T11:01:22,890 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:01:22,892 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:01:22,892 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:01:22,892 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:01:22,892 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:01:22,892 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:01:22,892 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:01:22,892 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:01:22,892 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:01:22,893 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:01:22,893 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:01:22,893 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, ASSIGN}] 2024-11-15T11:01:22,896 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, ASSIGN 2024-11-15T11:01:22,896 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, ASSIGN 2024-11-15T11:01:22,899 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:01:22,899 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, ASSIGN; state=OFFLINE, location=4cdab6d79b20,36113,1731668474097; forceNewPlan=false, retain=false 2024-11-15T11:01:23,049 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:01:23,050 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e45f0f459cfa1b2a327d167c20ebf16b, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:23,050 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4ddca89ec1344d996622b72ea235ecfc, regionState=OPENING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:23,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, ASSIGN because future has completed 2024-11-15T11:01:23,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4ddca89ec1344d996622b72ea235ecfc, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:01:23,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, ASSIGN because future has completed 2024-11-15T11:01:23,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:01:23,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T11:01:23,210 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:01:23,229 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:23,229 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => e45f0f459cfa1b2a327d167c20ebf16b, NAME => 'testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:01:23,230 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. service=AccessControlService 2024-11-15T11:01:23,230 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:23,231 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,231 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:23,231 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,231 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,237 INFO [StoreOpener-e45f0f459cfa1b2a327d167c20ebf16b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,237 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37061, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:01:23,241 INFO [StoreOpener-e45f0f459cfa1b2a327d167c20ebf16b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e45f0f459cfa1b2a327d167c20ebf16b columnFamilyName cf 2024-11-15T11:01:23,241 DEBUG [StoreOpener-e45f0f459cfa1b2a327d167c20ebf16b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:23,242 INFO [StoreOpener-e45f0f459cfa1b2a327d167c20ebf16b-1 {}] regionserver.HStore(327): Store=e45f0f459cfa1b2a327d167c20ebf16b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:23,242 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,244 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,245 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,246 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:23,246 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 4ddca89ec1344d996622b72ea235ecfc, NAME => 'testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:01:23,246 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. service=AccessControlService 2024-11-15T11:01:23,247 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:23,248 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,248 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:23,248 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,248 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,249 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,249 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,253 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,255 INFO [StoreOpener-4ddca89ec1344d996622b72ea235ecfc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,257 INFO [StoreOpener-4ddca89ec1344d996622b72ea235ecfc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ddca89ec1344d996622b72ea235ecfc columnFamilyName cf 2024-11-15T11:01:23,258 DEBUG [StoreOpener-4ddca89ec1344d996622b72ea235ecfc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:23,259 INFO [StoreOpener-4ddca89ec1344d996622b72ea235ecfc-1 {}] regionserver.HStore(327): Store=4ddca89ec1344d996622b72ea235ecfc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:23,260 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:23,260 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,260 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened e45f0f459cfa1b2a327d167c20ebf16b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75108365, jitterRate=0.11920185387134552}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:01:23,261 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:23,262 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for e45f0f459cfa1b2a327d167c20ebf16b: Running coprocessor pre-open hook at 1731668483232Writing region info on filesystem at 1731668483232Initializing all the Stores at 1731668483235 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668483235Cleaning up temporary data from old regions at 1731668483249 (+14 ms)Running coprocessor post-open hooks at 1731668483261 (+12 ms)Region opened successfully at 1731668483262 (+1 ms) 2024-11-15T11:01:23,262 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,263 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,264 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,264 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,265 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., pid=11, masterSystemTime=1731668483214 2024-11-15T11:01:23,267 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,273 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:23,273 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:23,274 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e45f0f459cfa1b2a327d167c20ebf16b, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:23,276 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:23,277 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 4ddca89ec1344d996622b72ea235ecfc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73008830, jitterRate=0.08791634440422058}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:01:23,277 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:23,278 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 4ddca89ec1344d996622b72ea235ecfc: Running coprocessor pre-open hook at 1731668483249Writing region info on filesystem at 1731668483249Initializing all the Stores at 1731668483252 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668483252Cleaning up temporary data from old regions at 1731668483264 (+12 ms)Running coprocessor post-open hooks at 1731668483277 (+13 ms)Region opened successfully at 1731668483278 (+1 ms) 2024-11-15T11:01:23,280 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc., pid=10, masterSystemTime=1731668483210 2024-11-15T11:01:23,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:01:23,296 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:23,296 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:23,297 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4ddca89ec1344d996622b72ea235ecfc, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:23,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-15T11:01:23,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b, server=4cdab6d79b20,39207,1731668474152 in 238 msec 2024-11-15T11:01:23,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4ddca89ec1344d996622b72ea235ecfc, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:01:23,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, ASSIGN in 411 msec 2024-11-15T11:01:23,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-15T11:01:23,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 4ddca89ec1344d996622b72ea235ecfc, server=4cdab6d79b20,36113,1731668474097 in 256 msec 2024-11-15T11:01:23,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T11:01:23,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, ASSIGN in 426 msec 2024-11-15T11:01:23,326 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:01:23,326 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668483326"}]},"ts":"1731668483326"} 2024-11-15T11:01:23,331 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-15T11:01:23,333 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:01:23,339 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-15T11:01:23,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:23,363 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:23,363 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:23,364 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:23,365 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58225, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-15T11:01:23,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:23,376 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-15T11:01:23,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:23,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:01:23,410 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:23,410 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:23,410 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:23,410 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:23,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 659 msec 2024-11-15T11:01:23,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:01:23,588 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-15T11:01:23,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T11:01:23,588 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T11:01:23,589 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:01:23,589 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-15T11:01:23,590 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:01:23,590 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T11:01:23,591 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-15T11:01:23,591 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-15T11:01:23,591 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:01:23,591 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-15T11:01:23,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-15T11:01:23,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-15T11:01:23,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:01:23,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T11:01:23,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-15T11:01:23,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-15T11:01:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T11:01:23,896 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T11:01:23,896 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-11-15T11:01:23,897 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:23,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-11-15T11:01:23,904 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:23,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-11-15T11:01:23,908 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T11:01:23,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T11:01:23,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668483922 (current time:1731668483922). 2024-11-15T11:01:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:01:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-15T11:01:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:01:23,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d50d4b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:23,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:23,927 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:23,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:23,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:23,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@598069ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:23,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:23,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:23,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:23,930 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:23,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44d2e7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:23,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:23,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:23,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:23,935 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:23,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:23,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:23,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:23,945 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f3c692d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:23,949 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:23,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:23,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11cea010, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:23,951 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:23,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@86f906b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:23,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:23,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:23,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:23,957 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48194, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:23,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:23,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:23,963 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-15T11:01:23,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:01:23,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T11:01:23,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-15T11:01:23,980 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:01:23,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-15T11:01:23,987 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:01:24,008 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:01:24,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741845_1021 (size=167) 2024-11-15T11:01:24,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741845_1021 (size=167) 2024-11-15T11:01:24,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741845_1021 (size=167) 2024-11-15T11:01:24,036 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:01:24,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b}] 2024-11-15T11:01:24,044 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:24,044 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:24,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-15T11:01:24,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-15T11:01:24,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-15T11:01:24,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:24,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:24,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for e45f0f459cfa1b2a327d167c20ebf16b: 2024-11-15T11:01:24,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. for emptySnaptb0-testExportWithTargetName completed. 2024-11-15T11:01:24,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-15T11:01:24,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 4ddca89ec1344d996622b72ea235ecfc: 2024-11-15T11:01:24,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. for emptySnaptb0-testExportWithTargetName completed. 2024-11-15T11:01:24,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-15T11:01:24,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:24,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:24,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:01:24,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:01:24,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741846_1022 (size=70) 2024-11-15T11:01:24,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741846_1022 (size=70) 2024-11-15T11:01:24,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741846_1022 (size=70) 2024-11-15T11:01:24,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:24,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-15T11:01:24,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741847_1023 (size=70) 2024-11-15T11:01:24,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-15T11:01:24,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:24,252 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:24,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741847_1023 (size=70) 2024-11-15T11:01:24,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741847_1023 (size=70) 2024-11-15T11:01:24,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:24,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-15T11:01:24,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-15T11:01:24,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:24,259 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:24,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc in 217 msec 2024-11-15T11:01:24,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-15T11:01:24,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b in 222 msec 2024-11-15T11:01:24,266 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:01:24,269 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:01:24,273 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:01:24,273 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-15T11:01:24,276 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-15T11:01:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-15T11:01:24,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741848_1024 (size=549) 2024-11-15T11:01:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741848_1024 (size=549) 2024-11-15T11:01:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741848_1024 (size=549) 2024-11-15T11:01:24,320 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:01:24,336 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:01:24,338 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-15T11:01:24,342 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:01:24,342 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-15T11:01:24,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 371 msec 2024-11-15T11:01:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-15T11:01:24,607 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T11:01:24,622 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='021b2c365debd53f51ba1adbca93f3885', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:24,625 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='10d3eabbe53fd4a62d30160f20dcdb6a9', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,637 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='2cd837a599f95747bb581818dd33add1f', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,639 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='322055c37af6e8dea656f7d7feb9e91eb', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,640 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:24,640 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='453840675f0320ac0990cca25de16d93e', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,642 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='5d6a4214343e61a7f7be1702099060754', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,643 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='57a4f301d0057dca0193fd4848746f0b', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,646 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='6eb35b029798c4900b30c4457547e9046', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:01:24,673 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:24,674 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36113 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:01:24,678 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T11:01:24,684 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-15T11:01:24,685 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:24,686 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:24,689 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T11:01:24,707 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T11:01:24,723 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T11:01:24,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T11:01:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668484731 (current time:1731668484731). 2024-11-15T11:01:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:01:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-15T11:01:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:01:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@440422d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:24,735 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:24,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:24,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:24,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35b93a07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:24,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:24,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:24,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:24,738 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:24,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a8f020d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:24,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:24,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:24,744 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:24,745 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:24,746 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@620ec52f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:24,750 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:24,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:24,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:24,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75739dcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:24,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:24,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:24,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:24,752 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47938, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:24,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@586cb04e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:24,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:24,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:24,759 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:24,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:24,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:24,767 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-15T11:01:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:01:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T11:01:24,777 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:01:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-15T11:01:24,780 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:01:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T11:01:24,787 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:01:24,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741849_1025 (size=162) 2024-11-15T11:01:24,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741849_1025 (size=162) 2024-11-15T11:01:24,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741849_1025 (size=162) 2024-11-15T11:01:24,811 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:01:24,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b}] 2024-11-15T11:01:24,815 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:24,815 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:24,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T11:01:24,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-15T11:01:24,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-15T11:01:24,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:24,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:24,974 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing e45f0f459cfa1b2a327d167c20ebf16b 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-15T11:01:24,974 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 4ddca89ec1344d996622b72ea235ecfc 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-15T11:01:25,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/.tmp/cf/a9d6a4e1ed33456484d74c2d430d1d4e is 71, key is 14fd85f2b0994830c97710542fe42d1e/cf:q/1731668484660/Put/seqid=0 2024-11-15T11:01:25,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/.tmp/cf/e859d7fca6c54750ae88f22155ec6ed1 is 71, key is 038d5eb9c631241b15e012d95c2da367/cf:q/1731668484674/Put/seqid=0 2024-11-15T11:01:25,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T11:01:25,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741851_1027 (size=5356) 2024-11-15T11:01:25,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741851_1027 (size=5356) 2024-11-15T11:01:25,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741851_1027 (size=5356) 2024-11-15T11:01:25,105 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/.tmp/cf/e859d7fca6c54750ae88f22155ec6ed1 2024-11-15T11:01:25,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741850_1026 (size=8258) 2024-11-15T11:01:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741850_1026 (size=8258) 2024-11-15T11:01:25,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741850_1026 (size=8258) 2024-11-15T11:01:25,122 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/.tmp/cf/a9d6a4e1ed33456484d74c2d430d1d4e 2024-11-15T11:01:25,186 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/.tmp/cf/e859d7fca6c54750ae88f22155ec6ed1 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/cf/e859d7fca6c54750ae88f22155ec6ed1 2024-11-15T11:01:25,186 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/.tmp/cf/a9d6a4e1ed33456484d74c2d430d1d4e as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/cf/a9d6a4e1ed33456484d74c2d430d1d4e 2024-11-15T11:01:25,199 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/cf/e859d7fca6c54750ae88f22155ec6ed1, entries=4, sequenceid=6, filesize=5.2 K 2024-11-15T11:01:25,200 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/cf/a9d6a4e1ed33456484d74c2d430d1d4e, entries=46, sequenceid=6, filesize=8.1 K 2024-11-15T11:01:25,210 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 4ddca89ec1344d996622b72ea235ecfc in 234ms, sequenceid=6, compaction requested=false 2024-11-15T11:01:25,210 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for e45f0f459cfa1b2a327d167c20ebf16b in 235ms, sequenceid=6, compaction requested=false 2024-11-15T11:01:25,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-15T11:01:25,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-15T11:01:25,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for e45f0f459cfa1b2a327d167c20ebf16b: 2024-11-15T11:01:25,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 4ddca89ec1344d996622b72ea235ecfc: 2024-11-15T11:01:25,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. for snaptb0-testExportWithTargetName completed. 2024-11-15T11:01:25,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. for snaptb0-testExportWithTargetName completed. 2024-11-15T11:01:25,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-15T11:01:25,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:25,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-15T11:01:25,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/cf/a9d6a4e1ed33456484d74c2d430d1d4e] hfiles 2024-11-15T11:01:25,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:25,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/cf/e859d7fca6c54750ae88f22155ec6ed1] hfiles 2024-11-15T11:01:25,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/cf/e859d7fca6c54750ae88f22155ec6ed1 for snapshot=snaptb0-testExportWithTargetName 2024-11-15T11:01:25,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/cf/a9d6a4e1ed33456484d74c2d430d1d4e for snapshot=snaptb0-testExportWithTargetName 2024-11-15T11:01:25,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741853_1029 (size=109) 2024-11-15T11:01:25,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741853_1029 (size=109) 2024-11-15T11:01:25,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741853_1029 (size=109) 2024-11-15T11:01:25,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:25,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-15T11:01:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-15T11:01:25,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:25,303 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:25,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4ddca89ec1344d996622b72ea235ecfc in 494 msec 2024-11-15T11:01:25,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741852_1028 (size=109) 2024-11-15T11:01:25,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741852_1028 (size=109) 2024-11-15T11:01:25,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741852_1028 (size=109) 2024-11-15T11:01:25,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:25,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-15T11:01:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-15T11:01:25,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:25,321 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:25,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-15T11:01:25,328 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:01:25,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b in 512 msec 2024-11-15T11:01:25,330 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:01:25,331 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:01:25,331 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-15T11:01:25,333 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-15T11:01:25,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741854_1030 (size=627) 2024-11-15T11:01:25,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741854_1030 (size=627) 2024-11-15T11:01:25,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741854_1030 (size=627) 2024-11-15T11:01:25,388 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:01:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T11:01:25,416 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:01:25,417 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-15T11:01:25,422 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:01:25,422 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-15T11:01:25,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 651 msec 2024-11-15T11:01:25,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T11:01:25,916 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T11:01:25,917 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917 2024-11-15T11:01:25,917 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:26,012 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:26,012 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-15T11:01:26,021 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:01:26,047 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-15T11:01:26,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741855_1031 (size=162) 2024-11-15T11:01:26,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741855_1031 (size=162) 2024-11-15T11:01:26,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741855_1031 (size=162) 2024-11-15T11:01:26,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741856_1032 (size=627) 2024-11-15T11:01:26,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741856_1032 (size=627) 2024-11-15T11:01:26,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741856_1032 (size=627) 2024-11-15T11:01:26,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741857_1033 (size=154) 2024-11-15T11:01:26,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741857_1033 (size=154) 2024-11-15T11:01:26,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741857_1033 (size=154) 2024-11-15T11:01:26,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:26,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:26,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-18171940890368331125.jar 2024-11-15T11:01:27,429 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,430 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,503 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-18053696561216160410.jar 2024-11-15T11:01:27,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:27,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:01:27,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:01:27,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:01:27,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:01:27,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:01:27,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:01:27,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:01:27,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:01:27,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:01:27,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:01:27,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:01:27,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:27,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:27,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:01:27,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:27,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:27,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:01:27,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:01:27,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741858_1034 (size=131440) 2024-11-15T11:01:27,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741858_1034 (size=131440) 2024-11-15T11:01:27,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741858_1034 (size=131440) 2024-11-15T11:01:27,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741859_1035 (size=4188619) 2024-11-15T11:01:27,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741859_1035 (size=4188619) 2024-11-15T11:01:27,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741859_1035 (size=4188619) 2024-11-15T11:01:28,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741860_1036 (size=1323991) 2024-11-15T11:01:28,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741860_1036 (size=1323991) 2024-11-15T11:01:28,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741860_1036 (size=1323991) 2024-11-15T11:01:28,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741861_1037 (size=903737) 2024-11-15T11:01:28,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741861_1037 (size=903737) 2024-11-15T11:01:28,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741861_1037 (size=903737) 2024-11-15T11:01:28,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741862_1038 (size=8360083) 2024-11-15T11:01:28,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741862_1038 (size=8360083) 2024-11-15T11:01:28,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741862_1038 (size=8360083) 2024-11-15T11:01:28,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741863_1039 (size=1877034) 2024-11-15T11:01:28,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741863_1039 (size=1877034) 2024-11-15T11:01:28,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741863_1039 (size=1877034) 2024-11-15T11:01:28,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741864_1040 (size=6424745) 2024-11-15T11:01:28,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741864_1040 (size=6424745) 2024-11-15T11:01:28,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741864_1040 (size=6424745) 2024-11-15T11:01:28,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741865_1041 (size=77835) 2024-11-15T11:01:28,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741865_1041 (size=77835) 2024-11-15T11:01:28,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741865_1041 (size=77835) 2024-11-15T11:01:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741866_1042 (size=30949) 2024-11-15T11:01:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741866_1042 (size=30949) 2024-11-15T11:01:28,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741866_1042 (size=30949) 2024-11-15T11:01:28,735 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:01:28,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741867_1043 (size=1597327) 2024-11-15T11:01:28,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741867_1043 (size=1597327) 2024-11-15T11:01:28,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741867_1043 (size=1597327) 2024-11-15T11:01:28,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741868_1044 (size=4695811) 2024-11-15T11:01:28,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741868_1044 (size=4695811) 2024-11-15T11:01:28,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741868_1044 (size=4695811) 2024-11-15T11:01:28,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741869_1045 (size=232957) 2024-11-15T11:01:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741869_1045 (size=232957) 2024-11-15T11:01:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741869_1045 (size=232957) 2024-11-15T11:01:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741870_1046 (size=127628) 2024-11-15T11:01:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741870_1046 (size=127628) 2024-11-15T11:01:29,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741870_1046 (size=127628) 2024-11-15T11:01:29,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741871_1047 (size=20406) 2024-11-15T11:01:29,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741871_1047 (size=20406) 2024-11-15T11:01:29,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741871_1047 (size=20406) 2024-11-15T11:01:29,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741872_1048 (size=5175431) 2024-11-15T11:01:29,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741872_1048 (size=5175431) 2024-11-15T11:01:29,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741872_1048 (size=5175431) 2024-11-15T11:01:29,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741873_1049 (size=217634) 2024-11-15T11:01:29,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741873_1049 (size=217634) 2024-11-15T11:01:29,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741873_1049 (size=217634) 2024-11-15T11:01:29,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741874_1050 (size=1832290) 2024-11-15T11:01:29,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741874_1050 (size=1832290) 2024-11-15T11:01:29,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741874_1050 (size=1832290) 2024-11-15T11:01:29,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741875_1051 (size=322274) 2024-11-15T11:01:29,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741875_1051 (size=322274) 2024-11-15T11:01:29,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741875_1051 (size=322274) 2024-11-15T11:01:29,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741876_1052 (size=503880) 2024-11-15T11:01:29,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741876_1052 (size=503880) 2024-11-15T11:01:29,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741876_1052 (size=503880) 2024-11-15T11:01:29,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741877_1053 (size=440656) 2024-11-15T11:01:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741877_1053 (size=440656) 2024-11-15T11:01:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741877_1053 (size=440656) 2024-11-15T11:01:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741878_1054 (size=29229) 2024-11-15T11:01:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741878_1054 (size=29229) 2024-11-15T11:01:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741878_1054 (size=29229) 2024-11-15T11:01:29,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741879_1055 (size=24096) 2024-11-15T11:01:29,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741879_1055 (size=24096) 2024-11-15T11:01:29,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741879_1055 (size=24096) 2024-11-15T11:01:29,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741880_1056 (size=111872) 2024-11-15T11:01:29,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741880_1056 (size=111872) 2024-11-15T11:01:29,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741880_1056 (size=111872) 2024-11-15T11:01:29,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741881_1057 (size=45609) 2024-11-15T11:01:29,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741881_1057 (size=45609) 2024-11-15T11:01:29,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741881_1057 (size=45609) 2024-11-15T11:01:29,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741882_1058 (size=136454) 2024-11-15T11:01:29,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741882_1058 (size=136454) 2024-11-15T11:01:29,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741882_1058 (size=136454) 2024-11-15T11:01:29,660 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:01:29,670 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-15T11:01:29,686 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:01:29,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741883_1059 (size=342) 2024-11-15T11:01:29,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741883_1059 (size=342) 2024-11-15T11:01:29,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741883_1059 (size=342) 2024-11-15T11:01:29,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741884_1060 (size=15) 2024-11-15T11:01:29,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741884_1060 (size=15) 2024-11-15T11:01:29,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741884_1060 (size=15) 2024-11-15T11:01:30,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741885_1061 (size=303739) 2024-11-15T11:01:30,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741885_1061 (size=303739) 2024-11-15T11:01:30,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741885_1061 (size=303739) 2024-11-15T11:01:30,695 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:01:30,699 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:01:30,866 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0001_000001 (auth:SIMPLE) from 127.0.0.1:40534 2024-11-15T11:01:39,329 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0001_000001 (auth:SIMPLE) from 127.0.0.1:56214 2024-11-15T11:01:39,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741886_1062 (size=349389) 2024-11-15T11:01:39,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741886_1062 (size=349389) 2024-11-15T11:01:39,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741886_1062 (size=349389) 2024-11-15T11:01:41,618 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0001_000001 (auth:SIMPLE) from 127.0.0.1:35296 2024-11-15T11:01:41,949 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:01:46,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741887_1063 (size=8258) 2024-11-15T11:01:46,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741887_1063 (size=8258) 2024-11-15T11:01:46,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741887_1063 (size=8258) 2024-11-15T11:01:46,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741888_1064 (size=5356) 2024-11-15T11:01:46,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741888_1064 (size=5356) 2024-11-15T11:01:46,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741888_1064 (size=5356) 2024-11-15T11:01:47,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741889_1065 (size=17419) 2024-11-15T11:01:47,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741889_1065 (size=17419) 2024-11-15T11:01:47,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741889_1065 (size=17419) 2024-11-15T11:01:47,259 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_0/usercache/jenkins/appcache/application_1731668480794_0001/container_1731668480794_0001_01_000002/launch_container.sh] 2024-11-15T11:01:47,260 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_0/usercache/jenkins/appcache/application_1731668480794_0001/container_1731668480794_0001_01_000002/container_tokens] 2024-11-15T11:01:47,260 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_0/usercache/jenkins/appcache/application_1731668480794_0001/container_1731668480794_0001_01_000002/sysfs] 2024-11-15T11:01:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741890_1066 (size=464) 2024-11-15T11:01:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741890_1066 (size=464) 2024-11-15T11:01:47,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741890_1066 (size=464) 2024-11-15T11:01:47,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741891_1067 (size=17419) 2024-11-15T11:01:47,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741891_1067 (size=17419) 2024-11-15T11:01:47,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741891_1067 (size=17419) 2024-11-15T11:01:47,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741892_1068 (size=349389) 2024-11-15T11:01:47,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741892_1068 (size=349389) 2024-11-15T11:01:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741892_1068 (size=349389) 2024-11-15T11:01:49,124 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:01:49,125 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:01:49,156 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-15T11:01:49,157 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:01:49,158 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:01:49,158 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-15T11:01:49,159 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-15T11:01:49,159 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-15T11:01:49,159 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917/.hbase-snapshot/testExportWithTargetName 2024-11-15T11:01:49,160 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-15T11:01:49,160 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668485917/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-15T11:01:49,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-15T11:01:49,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:49,209 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668509208"}]},"ts":"1731668509208"} 2024-11-15T11:01:49,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-15T11:01:49,213 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-15T11:01:49,213 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-15T11:01:49,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-15T11:01:49,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, UNASSIGN}] 2024-11-15T11:01:49,226 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, UNASSIGN 2024-11-15T11:01:49,227 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, UNASSIGN 2024-11-15T11:01:49,231 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=e45f0f459cfa1b2a327d167c20ebf16b, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:49,233 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=4ddca89ec1344d996622b72ea235ecfc, regionState=CLOSING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:49,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, UNASSIGN because future has completed 2024-11-15T11:01:49,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, UNASSIGN because future has completed 2024-11-15T11:01:49,241 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:01:49,241 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:01:49,246 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:01:49,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4ddca89ec1344d996622b72ea235ecfc, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:01:49,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-15T11:01:49,403 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:49,403 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:01:49,404 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing e45f0f459cfa1b2a327d167c20ebf16b, disabling compactions & flushes 2024-11-15T11:01:49,404 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 4ddca89ec1344d996622b72ea235ecfc, disabling compactions & flushes 2024-11-15T11:01:49,404 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. after waiting 0 ms 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. after waiting 0 ms 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:49,404 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:49,437 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:01:49,441 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:01:49,441 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b. 2024-11-15T11:01:49,442 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for e45f0f459cfa1b2a327d167c20ebf16b: Waiting for close lock at 1731668509404Running coprocessor pre-close hooks at 1731668509404Disabling compacts and flushes for region at 1731668509404Disabling writes for close at 1731668509404Writing region close event to WAL at 1731668509407 (+3 ms)Running coprocessor post-close hooks at 1731668509438 (+31 ms)Closed at 1731668509441 (+3 ms) 2024-11-15T11:01:49,444 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:01:49,445 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:01:49,445 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc. 2024-11-15T11:01:49,445 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 4ddca89ec1344d996622b72ea235ecfc: Waiting for close lock at 1731668509404Running coprocessor pre-close hooks at 1731668509404Disabling compacts and flushes for region at 1731668509404Disabling writes for close at 1731668509404Writing region close event to WAL at 1731668509421 (+17 ms)Running coprocessor post-close hooks at 1731668509445 (+24 ms)Closed at 1731668509445 2024-11-15T11:01:49,446 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:49,447 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=e45f0f459cfa1b2a327d167c20ebf16b, regionState=CLOSED 2024-11-15T11:01:49,448 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:49,450 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=4ddca89ec1344d996622b72ea235ecfc, regionState=CLOSED 2024-11-15T11:01:49,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:01:49,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4ddca89ec1344d996622b72ea235ecfc, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:01:49,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-15T11:01:49,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure e45f0f459cfa1b2a327d167c20ebf16b, server=4cdab6d79b20,39207,1731668474152 in 211 msec 2024-11-15T11:01:49,458 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=20 2024-11-15T11:01:49,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e45f0f459cfa1b2a327d167c20ebf16b, UNASSIGN in 232 msec 2024-11-15T11:01:49,458 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure 4ddca89ec1344d996622b72ea235ecfc, server=4cdab6d79b20,36113,1731668474097 in 208 msec 2024-11-15T11:01:49,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-11-15T11:01:49,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4ddca89ec1344d996622b72ea235ecfc, UNASSIGN in 234 msec 2024-11-15T11:01:49,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-15T11:01:49,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 248 msec 2024-11-15T11:01:49,469 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668509469"}]},"ts":"1731668509469"} 2024-11-15T11:01:49,473 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-15T11:01:49,473 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-15T11:01:49,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 280 msec 2024-11-15T11:01:49,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-15T11:01:49,525 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T11:01:49,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-15T11:01:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:49,537 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-15T11:01:49,539 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:49,544 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-15T11:01:49,548 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:49,548 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:49,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T11:01:49,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T11:01:49,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T11:01:49,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T11:01:49,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:49,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:49,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:49,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-15T11:01:49,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-15T11:01:49,553 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:01:49,553 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:01:49,554 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/recovered.edits] 2024-11-15T11:01:49,554 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/recovered.edits] 2024-11-15T11:01:49,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:49,554 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-15T11:01:49,554 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:01:49,555 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-15T11:01:49,555 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:01:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-15T11:01:49,563 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/cf/e859d7fca6c54750ae88f22155ec6ed1 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/cf/e859d7fca6c54750ae88f22155ec6ed1 2024-11-15T11:01:49,563 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/cf/a9d6a4e1ed33456484d74c2d430d1d4e to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/cf/a9d6a4e1ed33456484d74c2d430d1d4e 2024-11-15T11:01:49,570 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc/recovered.edits/9.seqid 2024-11-15T11:01:49,571 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b/recovered.edits/9.seqid 2024-11-15T11:01:49,571 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/e45f0f459cfa1b2a327d167c20ebf16b 2024-11-15T11:01:49,572 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithTargetName/4ddca89ec1344d996622b72ea235ecfc 2024-11-15T11:01:49,572 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-15T11:01:49,579 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:49,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-15T11:01:49,592 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-15T11:01:49,598 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-15T11:01:49,600 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:49,600 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-15T11:01:49,601 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668509600"}]},"ts":"9223372036854775807"} 2024-11-15T11:01:49,601 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668509600"}]},"ts":"9223372036854775807"} 2024-11-15T11:01:49,604 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:01:49,604 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4ddca89ec1344d996622b72ea235ecfc, NAME => 'testtb-testExportWithTargetName,,1731668482747.4ddca89ec1344d996622b72ea235ecfc.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e45f0f459cfa1b2a327d167c20ebf16b, NAME => 'testtb-testExportWithTargetName,1,1731668482747.e45f0f459cfa1b2a327d167c20ebf16b.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:01:49,605 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-15T11:01:49,605 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668509605"}]},"ts":"9223372036854775807"} 2024-11-15T11:01:49,609 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-15T11:01:49,612 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T11:01:49,614 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 82 msec 2024-11-15T11:01:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-15T11:01:49,666 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-15T11:01:49,667 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T11:01:49,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-15T11:01:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-15T11:01:49,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-15T11:01:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-15T11:01:49,743 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=770 (was 720) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:50216 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1254 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:40695 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 17584) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40695 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_672355554_1 at /127.0.0.1:46946 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45145 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:46791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:46952 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45557 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=792 (was 778) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=629 (was 397) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2018 (was 4641) 2024-11-15T11:01:49,744 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=770 is superior to 500 2024-11-15T11:01:49,772 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=770, OpenFileDescriptor=792, MaxFileDescriptor=1048576, SystemLoadAverage=629, ProcessCount=19, AvailableMemoryMB=2011 2024-11-15T11:01:49,772 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=770 is superior to 500 2024-11-15T11:01:49,775 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:01:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:01:49,780 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:01:49,780 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:49,786 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:01:49,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-15T11:01:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T11:01:49,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741893_1069 (size=404) 2024-11-15T11:01:49,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741893_1069 (size=404) 2024-11-15T11:01:49,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741893_1069 (size=404) 2024-11-15T11:01:49,841 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f3cc2061a10174031b3261eff0c9da71, NAME => 'testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:49,844 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 647e0e6ebc95217ced15fb7a896dd27e, NAME => 'testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:49,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741894_1070 (size=65) 2024-11-15T11:01:49,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741894_1070 (size=65) 2024-11-15T11:01:49,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741894_1070 (size=65) 2024-11-15T11:01:49,893 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:49,893 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing f3cc2061a10174031b3261eff0c9da71, disabling compactions & flushes 2024-11-15T11:01:49,893 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:49,893 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:49,893 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. after waiting 0 ms 2024-11-15T11:01:49,893 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:49,893 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:49,893 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for f3cc2061a10174031b3261eff0c9da71: Waiting for close lock at 1731668509893Disabling compacts and flushes for region at 1731668509893Disabling writes for close at 1731668509893Writing region close event to WAL at 1731668509893Closed at 1731668509893 2024-11-15T11:01:49,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T11:01:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741895_1071 (size=65) 2024-11-15T11:01:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741895_1071 (size=65) 2024-11-15T11:01:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741895_1071 (size=65) 2024-11-15T11:01:49,900 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:49,900 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 647e0e6ebc95217ced15fb7a896dd27e, disabling compactions & flushes 2024-11-15T11:01:49,900 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:49,900 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:49,900 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. after waiting 0 ms 2024-11-15T11:01:49,901 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:49,901 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:49,901 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 647e0e6ebc95217ced15fb7a896dd27e: Waiting for close lock at 1731668509900Disabling compacts and flushes for region at 1731668509900Disabling writes for close at 1731668509901 (+1 ms)Writing region close event to WAL at 1731668509901Closed at 1731668509901 2024-11-15T11:01:49,903 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:01:49,903 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731668509903"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668509903"}]},"ts":"1731668509903"} 2024-11-15T11:01:49,904 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731668509903"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668509903"}]},"ts":"1731668509903"} 2024-11-15T11:01:49,911 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:01:49,917 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:01:49,917 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668509917"}]},"ts":"1731668509917"} 2024-11-15T11:01:49,920 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-15T11:01:49,921 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:01:49,923 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:01:49,923 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:01:49,923 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:01:49,923 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:01:49,923 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:01:49,923 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:01:49,923 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:01:49,923 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:01:49,923 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:01:49,923 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:01:49,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, ASSIGN}] 2024-11-15T11:01:49,925 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, ASSIGN 2024-11-15T11:01:49,926 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, ASSIGN 2024-11-15T11:01:49,927 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:01:49,928 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, ASSIGN; state=OFFLINE, location=4cdab6d79b20,36113,1731668474097; forceNewPlan=false, retain=false 2024-11-15T11:01:50,077 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:01:50,078 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=647e0e6ebc95217ced15fb7a896dd27e, regionState=OPENING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:50,078 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=f3cc2061a10174031b3261eff0c9da71, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:50,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, ASSIGN because future has completed 2024-11-15T11:01:50,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:01:50,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, ASSIGN because future has completed 2024-11-15T11:01:50,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3cc2061a10174031b3261eff0c9da71, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:01:50,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T11:01:50,236 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:01:50,241 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43123, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:01:50,241 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:50,242 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => 647e0e6ebc95217ced15fb7a896dd27e, NAME => 'testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:01:50,242 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. service=AccessControlService 2024-11-15T11:01:50,242 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:50,243 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,243 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:50,243 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,243 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,245 INFO [StoreOpener-647e0e6ebc95217ced15fb7a896dd27e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,245 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:50,246 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => f3cc2061a10174031b3261eff0c9da71, NAME => 'testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:01:50,246 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. service=AccessControlService 2024-11-15T11:01:50,246 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:50,246 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,246 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:50,246 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,247 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,248 INFO [StoreOpener-f3cc2061a10174031b3261eff0c9da71-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,249 INFO [StoreOpener-647e0e6ebc95217ced15fb7a896dd27e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 647e0e6ebc95217ced15fb7a896dd27e columnFamilyName cf 2024-11-15T11:01:50,249 DEBUG [StoreOpener-647e0e6ebc95217ced15fb7a896dd27e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:50,251 INFO [StoreOpener-f3cc2061a10174031b3261eff0c9da71-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f3cc2061a10174031b3261eff0c9da71 columnFamilyName cf 2024-11-15T11:01:50,251 DEBUG [StoreOpener-f3cc2061a10174031b3261eff0c9da71-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:50,255 INFO [StoreOpener-f3cc2061a10174031b3261eff0c9da71-1 {}] regionserver.HStore(327): Store=f3cc2061a10174031b3261eff0c9da71/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:50,255 INFO [StoreOpener-647e0e6ebc95217ced15fb7a896dd27e-1 {}] regionserver.HStore(327): Store=647e0e6ebc95217ced15fb7a896dd27e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:50,255 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,255 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,256 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,256 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,257 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,257 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,258 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,258 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,258 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,258 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,260 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,260 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,263 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:50,263 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:50,264 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened 647e0e6ebc95217ced15fb7a896dd27e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69948510, jitterRate=0.04231402277946472}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:01:50,264 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,265 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened f3cc2061a10174031b3261eff0c9da71; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63688821, jitterRate=-0.05096261203289032}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:01:50,265 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for 647e0e6ebc95217ced15fb7a896dd27e: Running coprocessor pre-open hook at 1731668510243Writing region info on filesystem at 1731668510243Initializing all the Stores at 1731668510244 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668510244Cleaning up temporary data from old regions at 1731668510258 (+14 ms)Running coprocessor post-open hooks at 1731668510264 (+6 ms)Region opened successfully at 1731668510265 (+1 ms) 2024-11-15T11:01:50,265 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,266 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for f3cc2061a10174031b3261eff0c9da71: Running coprocessor pre-open hook at 1731668510247Writing region info on filesystem at 1731668510247Initializing all the Stores at 1731668510247Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668510247Cleaning up temporary data from old regions at 1731668510258 (+11 ms)Running coprocessor post-open hooks at 1731668510265 (+7 ms)Region opened successfully at 1731668510265 2024-11-15T11:01:50,267 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71., pid=29, masterSystemTime=1731668510235 2024-11-15T11:01:50,267 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., pid=28, masterSystemTime=1731668510233 2024-11-15T11:01:50,269 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:50,269 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:50,270 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=f3cc2061a10174031b3261eff0c9da71, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:01:50,271 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:50,271 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:50,274 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=647e0e6ebc95217ced15fb7a896dd27e, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:50,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3cc2061a10174031b3261eff0c9da71, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:01:50,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:01:50,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=26 2024-11-15T11:01:50,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure f3cc2061a10174031b3261eff0c9da71, server=4cdab6d79b20,45865,1731668473964 in 195 msec 2024-11-15T11:01:50,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-11-15T11:01:50,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e, server=4cdab6d79b20,36113,1731668474097 in 196 msec 2024-11-15T11:01:50,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, ASSIGN in 357 msec 2024-11-15T11:01:50,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-11-15T11:01:50,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, ASSIGN in 358 msec 2024-11-15T11:01:50,286 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:01:50,286 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668510286"}]},"ts":"1731668510286"} 2024-11-15T11:01:50,288 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-15T11:01:50,290 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:01:50,290 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-15T11:01:50,294 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T11:01:50,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:50,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:50,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:50,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:50,298 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:50,298 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:50,298 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:50,298 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:50,301 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 523 msec 2024-11-15T11:01:50,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T11:01:50,417 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T11:01:50,417 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-15T11:01:50,417 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:50,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-15T11:01:50,423 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:50,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-11-15T11:01:50,423 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:50,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T11:01:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668510428 (current time:1731668510428). 2024-11-15T11:01:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:01:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-15T11:01:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:01:50,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17c80485, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:50,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:50,431 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:50,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:50,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:50,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a00e87c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:50,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:50,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,433 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:50,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f97d154, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:50,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:50,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:50,437 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:50,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:50,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:50,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,439 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:50,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77356a27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:50,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:50,441 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:50,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:50,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:50,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32919997, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:50,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:50,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,443 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43174, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:50,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15e6ff25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:50,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:50,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:50,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33512, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:50,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:50,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:50,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:50,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T11:01:50,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:01:50,453 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:50,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T11:01:50,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-15T11:01:50,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-15T11:01:50,457 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:01:50,459 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:01:50,463 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:01:50,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741896_1072 (size=161) 2024-11-15T11:01:50,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741896_1072 (size=161) 2024-11-15T11:01:50,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741896_1072 (size=161) 2024-11-15T11:01:50,475 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:01:50,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e}] 2024-11-15T11:01:50,476 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,477 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-15T11:01:50,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-15T11:01:50,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:50,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for 647e0e6ebc95217ced15fb7a896dd27e: 2024-11-15T11:01:50,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-15T11:01:50,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for f3cc2061a10174031b3261eff0c9da71: 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:50,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:01:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741898_1074 (size=68) 2024-11-15T11:01:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741897_1073 (size=68) 2024-11-15T11:01:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741898_1074 (size=68) 2024-11-15T11:01:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741897_1073 (size=68) 2024-11-15T11:01:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741897_1073 (size=68) 2024-11-15T11:01:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741898_1074 (size=68) 2024-11-15T11:01:50,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:50,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:50,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-15T11:01:50,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-15T11:01:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-15T11:01:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-15T11:01:50,646 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,646 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,646 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,646 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71 in 173 msec 2024-11-15T11:01:50,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=30 2024-11-15T11:01:50,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e in 173 msec 2024-11-15T11:01:50,652 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:01:50,655 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:01:50,656 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:01:50,656 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-15T11:01:50,657 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-15T11:01:50,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741899_1075 (size=543) 2024-11-15T11:01:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741899_1075 (size=543) 2024-11-15T11:01:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741899_1075 (size=543) 2024-11-15T11:01:50,672 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:01:50,677 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:01:50,678 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-15T11:01:50,679 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:01:50,680 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-15T11:01:50,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 227 msec 2024-11-15T11:01:50,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-15T11:01:50,776 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T11:01:50,782 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='05910802a1797b3e637c7e8472cb021fa', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:01:50,783 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='1f3efb898b1167feb7eb16b84131c8e8e', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:50,784 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2b593e18bb7d0ee05335feedf09901b07', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:50,786 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='3ecca76685201fdc78260ec71de0a1e53', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:50,787 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='454bdd207d0fbbc2234569e17a0b63ea2', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:50,788 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:50,788 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='5517cf4fb6630aa4ef5beec6960bb4bc8', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:50,789 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='69140f82abc14b79f23f8bebf34509f15', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:50,790 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:50,790 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='9b80094f0a7021c0081ee09a3b6ef9ba', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:50,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:01:50,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36113 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:01:50,795 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:50,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-15T11:01:50,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:50,798 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:50,800 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:50,806 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:50,812 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:50,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T11:01:50,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668510815 (current time:1731668510815). 2024-11-15T11:01:50,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:01:50,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-15T11:01:50,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:01:50,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@251cbe55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:50,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:50,817 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:50,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:50,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:50,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c598d13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:50,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:50,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,819 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:50,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@183325f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:50,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:50,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:50,821 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:50,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:50,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:50,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,823 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:50,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c4c95ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:50,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:50,824 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:50,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:50,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:50,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@796a1dbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:50,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:50,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,826 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43218, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:50,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5de75618, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:50,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:50,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:50,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:50,828 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:50,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:50,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:50,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:50,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:50,832 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:50,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T11:01:50,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:01:50,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T11:01:50,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-15T11:01:50,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T11:01:50,835 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:01:50,836 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:01:50,839 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:01:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741900_1076 (size=156) 2024-11-15T11:01:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741900_1076 (size=156) 2024-11-15T11:01:50,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741900_1076 (size=156) 2024-11-15T11:01:50,851 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:01:50,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e}] 2024-11-15T11:01:50,853 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:50,853 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:50,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T11:01:51,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-15T11:01:51,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:51,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-15T11:01:51,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:51,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing f3cc2061a10174031b3261eff0c9da71 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-15T11:01:51,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing 647e0e6ebc95217ced15fb7a896dd27e 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-15T11:01:51,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/.tmp/cf/3ea81371bfa34499b5bc25b69a6f2e23 is 71, key is 0090946722d5b417d6cb26f9cf2c8d87/cf:q/1731668510790/Put/seqid=0 2024-11-15T11:01:51,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/.tmp/cf/75e6a2676096486d95415aea279a4dc5 is 71, key is 1ae34d1e075031cf9ab48bebb0825e74/cf:q/1731668510792/Put/seqid=0 2024-11-15T11:01:51,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741901_1077 (size=5356) 2024-11-15T11:01:51,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741901_1077 (size=5356) 2024-11-15T11:01:51,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741901_1077 (size=5356) 2024-11-15T11:01:51,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741902_1078 (size=8256) 2024-11-15T11:01:51,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741902_1078 (size=8256) 2024-11-15T11:01:51,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741902_1078 (size=8256) 2024-11-15T11:01:51,057 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/.tmp/cf/75e6a2676096486d95415aea279a4dc5 2024-11-15T11:01:51,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/.tmp/cf/75e6a2676096486d95415aea279a4dc5 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/cf/75e6a2676096486d95415aea279a4dc5 2024-11-15T11:01:51,074 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/cf/75e6a2676096486d95415aea279a4dc5, entries=46, sequenceid=6, filesize=8.1 K 2024-11-15T11:01:51,076 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 647e0e6ebc95217ced15fb7a896dd27e in 71ms, sequenceid=6, compaction requested=false 2024-11-15T11:01:51,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-15T11:01:51,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for 647e0e6ebc95217ced15fb7a896dd27e: 2024-11-15T11:01:51,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. for snaptb0-testExportWithResetTtl completed. 2024-11-15T11:01:51,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T11:01:51,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:51,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/cf/75e6a2676096486d95415aea279a4dc5] hfiles 2024-11-15T11:01:51,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/cf/75e6a2676096486d95415aea279a4dc5 for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T11:01:51,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741903_1079 (size=107) 2024-11-15T11:01:51,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741903_1079 (size=107) 2024-11-15T11:01:51,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741903_1079 (size=107) 2024-11-15T11:01:51,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:01:51,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-15T11:01:51,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-15T11:01:51,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:51,087 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:01:51,089 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e in 237 msec 2024-11-15T11:01:51,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T11:01:51,451 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/.tmp/cf/3ea81371bfa34499b5bc25b69a6f2e23 2024-11-15T11:01:51,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T11:01:51,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/.tmp/cf/3ea81371bfa34499b5bc25b69a6f2e23 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/cf/3ea81371bfa34499b5bc25b69a6f2e23 2024-11-15T11:01:51,491 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/cf/3ea81371bfa34499b5bc25b69a6f2e23, entries=4, sequenceid=6, filesize=5.2 K 2024-11-15T11:01:51,493 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for f3cc2061a10174031b3261eff0c9da71 in 488ms, sequenceid=6, compaction requested=false 2024-11-15T11:01:51,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for f3cc2061a10174031b3261eff0c9da71: 2024-11-15T11:01:51,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. for snaptb0-testExportWithResetTtl completed. 2024-11-15T11:01:51,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T11:01:51,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:51,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/cf/3ea81371bfa34499b5bc25b69a6f2e23] hfiles 2024-11-15T11:01:51,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/cf/3ea81371bfa34499b5bc25b69a6f2e23 for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T11:01:51,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741904_1080 (size=107) 2024-11-15T11:01:51,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741904_1080 (size=107) 2024-11-15T11:01:51,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741904_1080 (size=107) 2024-11-15T11:01:51,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:01:51,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-15T11:01:51,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-15T11:01:51,503 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:51,504 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:01:51,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-11-15T11:01:51,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f3cc2061a10174031b3261eff0c9da71 in 654 msec 2024-11-15T11:01:51,507 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:01:51,508 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:01:51,509 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:01:51,509 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-15T11:01:51,510 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-15T11:01:51,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741905_1081 (size=621) 2024-11-15T11:01:51,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741905_1081 (size=621) 2024-11-15T11:01:51,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741905_1081 (size=621) 2024-11-15T11:01:51,524 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:01:51,530 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:01:51,531 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-15T11:01:51,532 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:01:51,532 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-15T11:01:51,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 699 msec 2024-11-15T11:01:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T11:01:51,976 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T11:01:51,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:01:51,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-15T11:01:51,981 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:01:51,981 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:51,981 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-15T11:01:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T11:01:51,984 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:01:52,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741906_1082 (size=397) 2024-11-15T11:01:52,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741906_1082 (size=397) 2024-11-15T11:01:52,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741906_1082 (size=397) 2024-11-15T11:01:52,003 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d71e80023855bc7b9793237992f73001, NAME => 'testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:52,004 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 82349b982f9caad8b001c802c129d3a7, NAME => 'testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741908_1084 (size=58) 2024-11-15T11:01:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741907_1083 (size=58) 2024-11-15T11:01:52,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741908_1084 (size=58) 2024-11-15T11:01:52,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741907_1083 (size=58) 2024-11-15T11:01:52,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741908_1084 (size=58) 2024-11-15T11:01:52,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741907_1083 (size=58) 2024-11-15T11:01:52,025 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:52,025 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 82349b982f9caad8b001c802c129d3a7, disabling compactions & flushes 2024-11-15T11:01:52,025 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,025 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,025 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. after waiting 0 ms 2024-11-15T11:01:52,025 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,026 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,026 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 82349b982f9caad8b001c802c129d3a7: Waiting for close lock at 1731668512025Disabling compacts and flushes for region at 1731668512025Disabling writes for close at 1731668512025Writing region close event to WAL at 1731668512026 (+1 ms)Closed at 1731668512026 2024-11-15T11:01:52,026 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:52,026 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing d71e80023855bc7b9793237992f73001, disabling compactions & flushes 2024-11-15T11:01:52,026 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,026 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,026 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. after waiting 0 ms 2024-11-15T11:01:52,026 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,026 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,026 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for d71e80023855bc7b9793237992f73001: Waiting for close lock at 1731668512026Disabling compacts and flushes for region at 1731668512026Disabling writes for close at 1731668512026Writing region close event to WAL at 1731668512026Closed at 1731668512026 2024-11-15T11:01:52,028 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:01:52,028 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731668512028"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668512028"}]},"ts":"1731668512028"} 2024-11-15T11:01:52,029 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731668512028"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668512028"}]},"ts":"1731668512028"} 2024-11-15T11:01:52,032 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:01:52,034 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:01:52,034 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668512034"}]},"ts":"1731668512034"} 2024-11-15T11:01:52,037 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-15T11:01:52,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:01:52,039 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:01:52,039 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:01:52,039 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:01:52,040 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:01:52,040 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:01:52,040 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:01:52,040 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:01:52,040 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:01:52,040 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:01:52,040 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:01:52,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, ASSIGN}] 2024-11-15T11:01:52,042 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, ASSIGN 2024-11-15T11:01:52,043 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, ASSIGN 2024-11-15T11:01:52,043 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:01:52,045 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, ASSIGN; state=OFFLINE, location=4cdab6d79b20,36113,1731668474097; forceNewPlan=false, retain=false 2024-11-15T11:01:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T11:01:52,194 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:01:52,194 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=d71e80023855bc7b9793237992f73001, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:52,194 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=82349b982f9caad8b001c802c129d3a7, regionState=OPENING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:52,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, ASSIGN because future has completed 2024-11-15T11:01:52,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure d71e80023855bc7b9793237992f73001, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:01:52,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, ASSIGN because future has completed 2024-11-15T11:01:52,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82349b982f9caad8b001c802c129d3a7, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:01:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T11:01:52,354 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,354 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,354 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => d71e80023855bc7b9793237992f73001, NAME => 'testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:01:52,354 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => 82349b982f9caad8b001c802c129d3a7, NAME => 'testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. service=AccessControlService 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. service=AccessControlService 2024-11-15T11:01:52,355 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:52,355 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,355 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,356 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,356 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,357 INFO [StoreOpener-d71e80023855bc7b9793237992f73001-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,357 INFO [StoreOpener-82349b982f9caad8b001c802c129d3a7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,359 INFO [StoreOpener-d71e80023855bc7b9793237992f73001-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d71e80023855bc7b9793237992f73001 columnFamilyName cf 2024-11-15T11:01:52,359 DEBUG [StoreOpener-d71e80023855bc7b9793237992f73001-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:52,359 INFO [StoreOpener-82349b982f9caad8b001c802c129d3a7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 82349b982f9caad8b001c802c129d3a7 columnFamilyName cf 2024-11-15T11:01:52,360 DEBUG [StoreOpener-82349b982f9caad8b001c802c129d3a7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:01:52,360 INFO [StoreOpener-d71e80023855bc7b9793237992f73001-1 {}] regionserver.HStore(327): Store=d71e80023855bc7b9793237992f73001/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:52,360 INFO [StoreOpener-82349b982f9caad8b001c802c129d3a7-1 {}] regionserver.HStore(327): Store=82349b982f9caad8b001c802c129d3a7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:01:52,360 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,360 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,361 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,361 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,362 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,362 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,362 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,362 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,362 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,362 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,365 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,365 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,368 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:52,368 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:01:52,369 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened 82349b982f9caad8b001c802c129d3a7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59858883, jitterRate=-0.10803313553333282}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:01:52,369 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened d71e80023855bc7b9793237992f73001; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75407345, jitterRate=0.12365700304508209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:01:52,369 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,369 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,370 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for 82349b982f9caad8b001c802c129d3a7: Running coprocessor pre-open hook at 1731668512356Writing region info on filesystem at 1731668512356Initializing all the Stores at 1731668512357 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668512357Cleaning up temporary data from old regions at 1731668512362 (+5 ms)Running coprocessor post-open hooks at 1731668512369 (+7 ms)Region opened successfully at 1731668512369 2024-11-15T11:01:52,370 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for d71e80023855bc7b9793237992f73001: Running coprocessor pre-open hook at 1731668512356Writing region info on filesystem at 1731668512356Initializing all the Stores at 1731668512357 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668512357Cleaning up temporary data from old regions at 1731668512362 (+5 ms)Running coprocessor post-open hooks at 1731668512369 (+7 ms)Region opened successfully at 1731668512370 (+1 ms) 2024-11-15T11:01:52,371 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7., pid=40, masterSystemTime=1731668512351 2024-11-15T11:01:52,371 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001., pid=39, masterSystemTime=1731668512350 2024-11-15T11:01:52,373 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,373 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,374 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=d71e80023855bc7b9793237992f73001, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:01:52,374 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,374 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,374 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=82349b982f9caad8b001c802c129d3a7, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:01:52,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure d71e80023855bc7b9793237992f73001, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:01:52,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82349b982f9caad8b001c802c129d3a7, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:01:52,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=37 2024-11-15T11:01:52,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure d71e80023855bc7b9793237992f73001, server=4cdab6d79b20,39207,1731668474152 in 180 msec 2024-11-15T11:01:52,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-11-15T11:01:52,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure 82349b982f9caad8b001c802c129d3a7, server=4cdab6d79b20,36113,1731668474097 in 180 msec 2024-11-15T11:01:52,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, ASSIGN in 340 msec 2024-11-15T11:01:52,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=36 2024-11-15T11:01:52,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, ASSIGN in 341 msec 2024-11-15T11:01:52,384 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:01:52,384 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668512384"}]},"ts":"1731668512384"} 2024-11-15T11:01:52,386 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-15T11:01:52,387 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:01:52,387 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-15T11:01:52,390 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T11:01:52,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:52,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:52,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:52,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:01:52,461 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,466 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,467 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:01:52,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 483 msec 2024-11-15T11:01:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T11:01:52,616 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-15T11:01:52,616 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-15T11:01:52,617 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:52,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-15T11:01:52,621 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:52,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-11-15T11:01:52,622 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:52,630 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='02abcb0945e735c2806f9831ae4c2ac60', locateType=CURRENT is [region=testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:52,631 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='11c6a6bbb47f04e67cfef0b10cfc2edb1', locateType=CURRENT is [region=testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:52,633 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2c1b5294ca35afffadeab3634e94c665d', locateType=CURRENT is [region=testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:52,634 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='3a2544c3beca8bfb27cb8d3f4e1782424', locateType=CURRENT is [region=testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:01:52,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:01:52,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36113 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:01:52,646 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:52,650 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-15T11:01:52,650 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,651 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:01:52,653 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:52,660 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:52,667 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T11:01:52,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-15T11:01:52,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668512671 (current time:1731668512671). 2024-11-15T11:01:52,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-15T11:01:52,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:01:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5618d98d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:52,675 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:52,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:52,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:52,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eaa367e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:52,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:52,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:52,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:52,678 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:52,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4014cba0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:52,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:52,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:52,682 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:52,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:52,686 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cc0cffd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:01:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:01:52,689 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:01:52,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:01:52,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:01:52,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12bd5a4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:52,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:01:52,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:01:52,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:52,691 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43258, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:01:52,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cc1e671, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:01:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:01:52,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:01:52,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:01:52,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33538, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:01:52,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:01:52,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:01:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:01:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:01:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T11:01:52,709 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:01:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:01:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-15T11:01:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-15T11:01:52,713 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:01:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-15T11:01:52,714 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:01:52,717 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:01:52,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741909_1085 (size=143) 2024-11-15T11:01:52,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741909_1085 (size=143) 2024-11-15T11:01:52,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741909_1085 (size=143) 2024-11-15T11:01:52,738 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:01:52,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d71e80023855bc7b9793237992f73001}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82349b982f9caad8b001c802c129d3a7}] 2024-11-15T11:01:52,739 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,739 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-15T11:01:52,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-15T11:01:52,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-15T11:01:52,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,893 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing 82349b982f9caad8b001c802c129d3a7 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-15T11:01:52,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,893 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing d71e80023855bc7b9793237992f73001 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-15T11:01:52,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/.tmp/cf/f426adaf061b472caaa6744574b1f8a3 is 69, key is 02abcb0945e735c2806f9831ae4c2ac60/cf:q/1731668512639/Put/seqid=0 2024-11-15T11:01:52,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/.tmp/cf/0aa628432657464b889961693f58b6f8 is 71, key is 19b605b23cd428aaca360c7d08e996be/cf:q/1731668512642/Put/seqid=0 2024-11-15T11:01:52,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741910_1086 (size=5149) 2024-11-15T11:01:52,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741911_1087 (size=8462) 2024-11-15T11:01:52,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741911_1087 (size=8462) 2024-11-15T11:01:52,925 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/.tmp/cf/f426adaf061b472caaa6744574b1f8a3 2024-11-15T11:01:52,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741911_1087 (size=8462) 2024-11-15T11:01:52,926 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/.tmp/cf/0aa628432657464b889961693f58b6f8 2024-11-15T11:01:52,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741910_1086 (size=5149) 2024-11-15T11:01:52,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741910_1086 (size=5149) 2024-11-15T11:01:52,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/.tmp/cf/f426adaf061b472caaa6744574b1f8a3 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/cf/f426adaf061b472caaa6744574b1f8a3 2024-11-15T11:01:52,934 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/.tmp/cf/0aa628432657464b889961693f58b6f8 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/cf/0aa628432657464b889961693f58b6f8 2024-11-15T11:01:52,940 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/cf/0aa628432657464b889961693f58b6f8, entries=49, sequenceid=5, filesize=8.3 K 2024-11-15T11:01:52,940 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/cf/f426adaf061b472caaa6744574b1f8a3, entries=1, sequenceid=5, filesize=5.0 K 2024-11-15T11:01:52,941 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for d71e80023855bc7b9793237992f73001 in 48ms, sequenceid=5, compaction requested=false 2024-11-15T11:01:52,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-15T11:01:52,941 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 82349b982f9caad8b001c802c129d3a7 in 48ms, sequenceid=5, compaction requested=false 2024-11-15T11:01:52,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for d71e80023855bc7b9793237992f73001: 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for 82349b982f9caad8b001c802c129d3a7: 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. for snaptb-testExportWithResetTtl completed. 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. for snaptb-testExportWithResetTtl completed. 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/cf/f426adaf061b472caaa6744574b1f8a3] hfiles 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/cf/f426adaf061b472caaa6744574b1f8a3 for snapshot=snaptb-testExportWithResetTtl 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/cf/0aa628432657464b889961693f58b6f8] hfiles 2024-11-15T11:01:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/cf/0aa628432657464b889961693f58b6f8 for snapshot=snaptb-testExportWithResetTtl 2024-11-15T11:01:52,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741912_1088 (size=100) 2024-11-15T11:01:52,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741913_1089 (size=100) 2024-11-15T11:01:52,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741913_1089 (size=100) 2024-11-15T11:01:52,952 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:01:52,952 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-15T11:01:52,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741912_1088 (size=100) 2024-11-15T11:01:52,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741913_1089 (size=100) 2024-11-15T11:01:52,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741912_1088 (size=100) 2024-11-15T11:01:52,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:01:52,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-15T11:01:52,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-15T11:01:52,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,954 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:01:52,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-15T11:01:52,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,956 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d71e80023855bc7b9793237992f73001 2024-11-15T11:01:52,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 82349b982f9caad8b001c802c129d3a7 in 217 msec 2024-11-15T11:01:52,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-15T11:01:52,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d71e80023855bc7b9793237992f73001 in 218 msec 2024-11-15T11:01:52,958 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:01:52,959 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:01:52,960 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:01:52,960 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-15T11:01:52,961 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-15T11:01:52,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741914_1090 (size=600) 2024-11-15T11:01:52,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741914_1090 (size=600) 2024-11-15T11:01:52,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741914_1090 (size=600) 2024-11-15T11:01:52,974 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:01:52,981 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:01:52,982 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-15T11:01:52,983 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:01:52,983 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-15T11:01:52,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 274 msec 2024-11-15T11:01:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-15T11:01:53,025 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-15T11:01:53,037 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037 2024-11-15T11:01:53,037 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:53,083 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:01:53,083 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-15T11:01:53,086 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:01:53,096 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-15T11:01:53,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741915_1091 (size=600) 2024-11-15T11:01:53,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741915_1091 (size=600) 2024-11-15T11:01:53,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741915_1091 (size=600) 2024-11-15T11:01:53,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741916_1092 (size=143) 2024-11-15T11:01:53,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741916_1092 (size=143) 2024-11-15T11:01:53,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741916_1092 (size=143) 2024-11-15T11:01:53,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741917_1093 (size=141) 2024-11-15T11:01:53,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741917_1093 (size=141) 2024-11-15T11:01:53,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741917_1093 (size=141) 2024-11-15T11:01:53,582 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:53,582 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:53,583 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:53,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-15T11:01:53,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-15T11:01:53,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-15T11:01:53,588 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-15T11:01:53,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-15T11:01:53,934 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0001_000001 (auth:SIMPLE) from 127.0.0.1:41212 2024-11-15T11:01:53,939 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_0/usercache/jenkins/appcache/application_1731668480794_0001/container_1731668480794_0001_01_000001/launch_container.sh] 2024-11-15T11:01:53,939 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_0/usercache/jenkins/appcache/application_1731668480794_0001/container_1731668480794_0001_01_000001/container_tokens] 2024-11-15T11:01:53,939 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_0/usercache/jenkins/appcache/application_1731668480794_0001/container_1731668480794_0001_01_000001/sysfs] 2024-11-15T11:01:54,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-3901736940327788341.jar 2024-11-15T11:01:54,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-3088274378983672920.jar 2024-11-15T11:01:54,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:01:54,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:01:54,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:01:54,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:01:54,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:01:54,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:01:54,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:01:54,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:01:54,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:01:54,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:01:54,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:01:54,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:01:54,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:54,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:54,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:01:54,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:54,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:01:54,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:01:54,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:01:54,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741918_1094 (size=131440) 2024-11-15T11:01:54,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741918_1094 (size=131440) 2024-11-15T11:01:54,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741918_1094 (size=131440) 2024-11-15T11:01:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741919_1095 (size=4188619) 2024-11-15T11:01:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741919_1095 (size=4188619) 2024-11-15T11:01:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741919_1095 (size=4188619) 2024-11-15T11:01:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741920_1096 (size=6424745) 2024-11-15T11:01:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741920_1096 (size=6424745) 2024-11-15T11:01:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741920_1096 (size=6424745) 2024-11-15T11:01:54,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741921_1097 (size=1323991) 2024-11-15T11:01:54,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741921_1097 (size=1323991) 2024-11-15T11:01:54,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741921_1097 (size=1323991) 2024-11-15T11:01:54,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741922_1098 (size=903737) 2024-11-15T11:01:54,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741922_1098 (size=903737) 2024-11-15T11:01:54,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741922_1098 (size=903737) 2024-11-15T11:01:54,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741923_1099 (size=8360083) 2024-11-15T11:01:54,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741923_1099 (size=8360083) 2024-11-15T11:01:54,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741923_1099 (size=8360083) 2024-11-15T11:01:54,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741924_1100 (size=1877034) 2024-11-15T11:01:54,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741924_1100 (size=1877034) 2024-11-15T11:01:54,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741924_1100 (size=1877034) 2024-11-15T11:01:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741925_1101 (size=77835) 2024-11-15T11:01:54,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741925_1101 (size=77835) 2024-11-15T11:01:54,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741925_1101 (size=77835) 2024-11-15T11:01:55,001 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:01:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741926_1102 (size=30949) 2024-11-15T11:01:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741926_1102 (size=30949) 2024-11-15T11:01:55,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741926_1102 (size=30949) 2024-11-15T11:01:55,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741927_1103 (size=1597327) 2024-11-15T11:01:55,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741927_1103 (size=1597327) 2024-11-15T11:01:55,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741927_1103 (size=1597327) 2024-11-15T11:01:55,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741928_1104 (size=4695811) 2024-11-15T11:01:55,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741928_1104 (size=4695811) 2024-11-15T11:01:55,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741928_1104 (size=4695811) 2024-11-15T11:01:55,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741929_1105 (size=232957) 2024-11-15T11:01:55,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741929_1105 (size=232957) 2024-11-15T11:01:55,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741929_1105 (size=232957) 2024-11-15T11:01:55,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741930_1106 (size=127628) 2024-11-15T11:01:55,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741930_1106 (size=127628) 2024-11-15T11:01:55,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741930_1106 (size=127628) 2024-11-15T11:01:55,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741931_1107 (size=20406) 2024-11-15T11:01:55,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741931_1107 (size=20406) 2024-11-15T11:01:55,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741931_1107 (size=20406) 2024-11-15T11:01:55,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741932_1108 (size=5175431) 2024-11-15T11:01:55,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741932_1108 (size=5175431) 2024-11-15T11:01:55,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741932_1108 (size=5175431) 2024-11-15T11:01:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741933_1109 (size=217634) 2024-11-15T11:01:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741933_1109 (size=217634) 2024-11-15T11:01:55,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741933_1109 (size=217634) 2024-11-15T11:01:55,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741934_1110 (size=1832290) 2024-11-15T11:01:55,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741934_1110 (size=1832290) 2024-11-15T11:01:55,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741934_1110 (size=1832290) 2024-11-15T11:01:55,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741935_1111 (size=322274) 2024-11-15T11:01:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741935_1111 (size=322274) 2024-11-15T11:01:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741935_1111 (size=322274) 2024-11-15T11:01:55,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741936_1112 (size=503880) 2024-11-15T11:01:55,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741936_1112 (size=503880) 2024-11-15T11:01:55,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741936_1112 (size=503880) 2024-11-15T11:01:55,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741937_1113 (size=440656) 2024-11-15T11:01:55,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741937_1113 (size=440656) 2024-11-15T11:01:55,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741937_1113 (size=440656) 2024-11-15T11:01:55,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741938_1114 (size=29229) 2024-11-15T11:01:55,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741938_1114 (size=29229) 2024-11-15T11:01:55,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741938_1114 (size=29229) 2024-11-15T11:01:55,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741939_1115 (size=24096) 2024-11-15T11:01:55,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741939_1115 (size=24096) 2024-11-15T11:01:55,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741939_1115 (size=24096) 2024-11-15T11:01:55,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741940_1116 (size=111872) 2024-11-15T11:01:55,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741940_1116 (size=111872) 2024-11-15T11:01:55,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741940_1116 (size=111872) 2024-11-15T11:01:55,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741941_1117 (size=45609) 2024-11-15T11:01:55,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741941_1117 (size=45609) 2024-11-15T11:01:55,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741941_1117 (size=45609) 2024-11-15T11:01:55,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741942_1118 (size=136454) 2024-11-15T11:01:55,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741942_1118 (size=136454) 2024-11-15T11:01:55,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741942_1118 (size=136454) 2024-11-15T11:01:55,394 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:01:55,397 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-15T11:01:55,400 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:01:55,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741943_1119 (size=324) 2024-11-15T11:01:55,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741943_1119 (size=324) 2024-11-15T11:01:55,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741943_1119 (size=324) 2024-11-15T11:01:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741944_1120 (size=15) 2024-11-15T11:01:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741944_1120 (size=15) 2024-11-15T11:01:55,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741944_1120 (size=15) 2024-11-15T11:01:55,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741945_1121 (size=303726) 2024-11-15T11:01:55,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741945_1121 (size=303726) 2024-11-15T11:01:55,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741945_1121 (size=303726) 2024-11-15T11:01:55,895 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:01:55,895 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:01:55,926 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0002_000001 (auth:SIMPLE) from 127.0.0.1:41218 2024-11-15T11:01:58,234 INFO [master/4cdab6d79b20:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T11:01:58,234 INFO [master/4cdab6d79b20:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T11:02:02,061 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0002_000001 (auth:SIMPLE) from 127.0.0.1:49626 2024-11-15T11:02:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741946_1122 (size=349376) 2024-11-15T11:02:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741946_1122 (size=349376) 2024-11-15T11:02:02,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741946_1122 (size=349376) 2024-11-15T11:02:04,393 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0002_000001 (auth:SIMPLE) from 127.0.0.1:51794 2024-11-15T11:02:08,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741947_1123 (size=8462) 2024-11-15T11:02:08,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741947_1123 (size=8462) 2024-11-15T11:02:08,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741947_1123 (size=8462) 2024-11-15T11:02:08,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741948_1124 (size=5149) 2024-11-15T11:02:08,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741948_1124 (size=5149) 2024-11-15T11:02:08,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741948_1124 (size=5149) 2024-11-15T11:02:08,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741949_1125 (size=17398) 2024-11-15T11:02:08,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741949_1125 (size=17398) 2024-11-15T11:02:08,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741949_1125 (size=17398) 2024-11-15T11:02:08,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741950_1126 (size=461) 2024-11-15T11:02:08,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741950_1126 (size=461) 2024-11-15T11:02:08,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741950_1126 (size=461) 2024-11-15T11:02:08,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741951_1127 (size=17398) 2024-11-15T11:02:08,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741951_1127 (size=17398) 2024-11-15T11:02:08,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741951_1127 (size=17398) 2024-11-15T11:02:08,956 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0002/container_1731668480794_0002_01_000002/launch_container.sh] 2024-11-15T11:02:08,957 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0002/container_1731668480794_0002_01_000002/container_tokens] 2024-11-15T11:02:08,957 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0002/container_1731668480794_0002_01_000002/sysfs] 2024-11-15T11:02:08,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741952_1128 (size=349376) 2024-11-15T11:02:08,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741952_1128 (size=349376) 2024-11-15T11:02:08,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741952_1128 (size=349376) 2024-11-15T11:02:08,984 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0002_000001 (auth:SIMPLE) from 127.0.0.1:51806 2024-11-15T11:02:10,136 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:02:10,138 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:02:10,155 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-15T11:02:10,155 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:02:10,156 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:02:10,156 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-15T11:02:10,157 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-15T11:02:10,157 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-15T11:02:10,157 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-15T11:02:10,158 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-15T11:02:10,158 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668513037/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-15T11:02:10,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-15T11:02:10,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-15T11:02:10,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-15T11:02:10,176 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668530175"}]},"ts":"1731668530175"} 2024-11-15T11:02:10,179 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-15T11:02:10,179 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-15T11:02:10,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-15T11:02:10,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, UNASSIGN}] 2024-11-15T11:02:10,184 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, UNASSIGN 2024-11-15T11:02:10,185 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, UNASSIGN 2024-11-15T11:02:10,186 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=d71e80023855bc7b9793237992f73001, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:02:10,187 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=82349b982f9caad8b001c802c129d3a7, regionState=CLOSING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:02:10,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, UNASSIGN because future has completed 2024-11-15T11:02:10,189 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:02:10,189 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure d71e80023855bc7b9793237992f73001, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:02:10,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, UNASSIGN because future has completed 2024-11-15T11:02:10,191 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:02:10,191 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82349b982f9caad8b001c802c129d3a7, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:02:10,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-15T11:02:10,343 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close d71e80023855bc7b9793237992f73001 2024-11-15T11:02:10,343 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:02:10,343 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing d71e80023855bc7b9793237992f73001, disabling compactions & flushes 2024-11-15T11:02:10,343 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:02:10,343 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:02:10,343 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. after waiting 0 ms 2024-11-15T11:02:10,343 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:02:10,345 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:02:10,345 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:02:10,345 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing 82349b982f9caad8b001c802c129d3a7, disabling compactions & flushes 2024-11-15T11:02:10,345 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:02:10,345 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:02:10,345 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. after waiting 0 ms 2024-11-15T11:02:10,346 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:02:10,353 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T11:02:10,354 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:02:10,354 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001. 2024-11-15T11:02:10,354 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for d71e80023855bc7b9793237992f73001: Waiting for close lock at 1731668530343Running coprocessor pre-close hooks at 1731668530343Disabling compacts and flushes for region at 1731668530343Disabling writes for close at 1731668530343Writing region close event to WAL at 1731668530345 (+2 ms)Running coprocessor post-close hooks at 1731668530354 (+9 ms)Closed at 1731668530354 2024-11-15T11:02:10,357 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed d71e80023855bc7b9793237992f73001 2024-11-15T11:02:10,358 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=d71e80023855bc7b9793237992f73001, regionState=CLOSED 2024-11-15T11:02:10,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure d71e80023855bc7b9793237992f73001, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:02:10,365 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T11:02:10,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-11-15T11:02:10,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure d71e80023855bc7b9793237992f73001, server=4cdab6d79b20,39207,1731668474152 in 173 msec 2024-11-15T11:02:10,366 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:02:10,366 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7. 2024-11-15T11:02:10,366 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for 82349b982f9caad8b001c802c129d3a7: Waiting for close lock at 1731668530345Running coprocessor pre-close hooks at 1731668530345Disabling compacts and flushes for region at 1731668530345Disabling writes for close at 1731668530346 (+1 ms)Writing region close event to WAL at 1731668530347 (+1 ms)Running coprocessor post-close hooks at 1731668530366 (+19 ms)Closed at 1731668530366 2024-11-15T11:02:10,367 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d71e80023855bc7b9793237992f73001, UNASSIGN in 183 msec 2024-11-15T11:02:10,370 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed 82349b982f9caad8b001c802c129d3a7 2024-11-15T11:02:10,372 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=82349b982f9caad8b001c802c129d3a7, regionState=CLOSED 2024-11-15T11:02:10,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82349b982f9caad8b001c802c129d3a7, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:02:10,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-11-15T11:02:10,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure 82349b982f9caad8b001c802c129d3a7, server=4cdab6d79b20,36113,1731668474097 in 185 msec 2024-11-15T11:02:10,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-11-15T11:02:10,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=82349b982f9caad8b001c802c129d3a7, UNASSIGN in 196 msec 2024-11-15T11:02:10,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-15T11:02:10,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 202 msec 2024-11-15T11:02:10,386 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668530386"}]},"ts":"1731668530386"} 2024-11-15T11:02:10,388 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-15T11:02:10,388 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-15T11:02:10,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 219 msec 2024-11-15T11:02:10,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-15T11:02:10,496 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-15T11:02:10,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-15T11:02:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T11:02:10,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T11:02:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-15T11:02:10,504 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T11:02:10,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-15T11:02:10,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,516 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001 2024-11-15T11:02:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T11:02:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,522 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:10,522 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:10,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-15T11:02:10,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:10,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:10,525 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/recovered.edits] 2024-11-15T11:02:10,531 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/cf/f426adaf061b472caaa6744574b1f8a3 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/cf/f426adaf061b472caaa6744574b1f8a3 2024-11-15T11:02:10,537 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7 2024-11-15T11:02:10,537 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/recovered.edits/8.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001/recovered.edits/8.seqid 2024-11-15T11:02:10,539 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/d71e80023855bc7b9793237992f73001 2024-11-15T11:02:10,541 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/recovered.edits] 2024-11-15T11:02:10,547 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/cf/0aa628432657464b889961693f58b6f8 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/cf/0aa628432657464b889961693f58b6f8 2024-11-15T11:02:10,551 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/recovered.edits/8.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7/recovered.edits/8.seqid 2024-11-15T11:02:10,552 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportWithResetTtl/82349b982f9caad8b001c802c129d3a7 2024-11-15T11:02:10,552 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-15T11:02:10,557 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T11:02:10,561 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-15T11:02:10,565 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-15T11:02:10,568 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T11:02:10,568 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-15T11:02:10,568 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668530568"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:10,568 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668530568"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:10,572 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:02:10,572 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d71e80023855bc7b9793237992f73001, NAME => 'testExportWithResetTtl,,1731668511977.d71e80023855bc7b9793237992f73001.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 82349b982f9caad8b001c802c129d3a7, NAME => 'testExportWithResetTtl,1,1731668511977.82349b982f9caad8b001c802c129d3a7.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:02:10,572 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-15T11:02:10,573 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668530572"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:10,576 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-15T11:02:10,577 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T11:02:10,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 80 msec 2024-11-15T11:02:10,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-15T11:02:10,626 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-15T11:02:10,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-15T11:02:10,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-15T11:02:10,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:02:10,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-15T11:02:10,636 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668530636"}]},"ts":"1731668530636"} 2024-11-15T11:02:10,645 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-15T11:02:10,645 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-15T11:02:10,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-15T11:02:10,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, UNASSIGN}] 2024-11-15T11:02:10,654 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, UNASSIGN 2024-11-15T11:02:10,654 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, UNASSIGN 2024-11-15T11:02:10,655 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=647e0e6ebc95217ced15fb7a896dd27e, regionState=CLOSING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:02:10,656 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=f3cc2061a10174031b3261eff0c9da71, regionState=CLOSING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:02:10,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, UNASSIGN because future has completed 2024-11-15T11:02:10,659 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:02:10,659 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:02:10,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, UNASSIGN because future has completed 2024-11-15T11:02:10,661 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:02:10,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure f3cc2061a10174031b3261eff0c9da71, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:02:10,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-15T11:02:10,812 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:02:10,812 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:02:10,812 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing 647e0e6ebc95217ced15fb7a896dd27e, disabling compactions & flushes 2024-11-15T11:02:10,812 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:02:10,812 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:02:10,812 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. after waiting 0 ms 2024-11-15T11:02:10,812 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:02:10,815 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:02:10,815 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:02:10,815 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing f3cc2061a10174031b3261eff0c9da71, disabling compactions & flushes 2024-11-15T11:02:10,815 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:02:10,815 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:02:10,815 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. after waiting 0 ms 2024-11-15T11:02:10,816 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:02:10,818 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:02:10,819 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:02:10,819 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e. 2024-11-15T11:02:10,819 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for 647e0e6ebc95217ced15fb7a896dd27e: Waiting for close lock at 1731668530812Running coprocessor pre-close hooks at 1731668530812Disabling compacts and flushes for region at 1731668530812Disabling writes for close at 1731668530812Writing region close event to WAL at 1731668530813 (+1 ms)Running coprocessor post-close hooks at 1731668530819 (+6 ms)Closed at 1731668530819 2024-11-15T11:02:10,822 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed 647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:02:10,822 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:02:10,823 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:02:10,823 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=647e0e6ebc95217ced15fb7a896dd27e, regionState=CLOSED 2024-11-15T11:02:10,823 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71. 2024-11-15T11:02:10,823 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for f3cc2061a10174031b3261eff0c9da71: Waiting for close lock at 1731668530815Running coprocessor pre-close hooks at 1731668530815Disabling compacts and flushes for region at 1731668530815Disabling writes for close at 1731668530816 (+1 ms)Writing region close event to WAL at 1731668530816Running coprocessor post-close hooks at 1731668530822 (+6 ms)Closed at 1731668530823 (+1 ms) 2024-11-15T11:02:10,825 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:02:10,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:02:10,827 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=f3cc2061a10174031b3261eff0c9da71, regionState=CLOSED 2024-11-15T11:02:10,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure f3cc2061a10174031b3261eff0c9da71, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:02:10,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=54 2024-11-15T11:02:10,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure 647e0e6ebc95217ced15fb7a896dd27e, server=4cdab6d79b20,36113,1731668474097 in 168 msec 2024-11-15T11:02:10,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=647e0e6ebc95217ced15fb7a896dd27e, UNASSIGN in 178 msec 2024-11-15T11:02:10,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=53 2024-11-15T11:02:10,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure f3cc2061a10174031b3261eff0c9da71, server=4cdab6d79b20,45865,1731668473964 in 170 msec 2024-11-15T11:02:10,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=53, resume processing ppid=52 2024-11-15T11:02:10,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f3cc2061a10174031b3261eff0c9da71, UNASSIGN in 182 msec 2024-11-15T11:02:10,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-15T11:02:10,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 188 msec 2024-11-15T11:02:10,843 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668530842"}]},"ts":"1731668530842"} 2024-11-15T11:02:10,845 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-15T11:02:10,845 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-15T11:02:10,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 217 msec 2024-11-15T11:02:10,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-15T11:02:10,956 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T11:02:10,956 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-15T11:02:10,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:02:10,959 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:02:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-15T11:02:10,960 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:02:10,963 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-15T11:02:10,965 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:02:10,965 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:02:10,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T11:02:10,969 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/recovered.edits] 2024-11-15T11:02:10,969 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/recovered.edits] 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:10,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-15T11:02:10,974 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/cf/75e6a2676096486d95415aea279a4dc5 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/cf/75e6a2676096486d95415aea279a4dc5 2024-11-15T11:02:10,975 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/cf/3ea81371bfa34499b5bc25b69a6f2e23 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/cf/3ea81371bfa34499b5bc25b69a6f2e23 2024-11-15T11:02:10,978 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e/recovered.edits/9.seqid 2024-11-15T11:02:10,979 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/647e0e6ebc95217ced15fb7a896dd27e 2024-11-15T11:02:10,980 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71/recovered.edits/9.seqid 2024-11-15T11:02:10,981 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithResetTtl/f3cc2061a10174031b3261eff0c9da71 2024-11-15T11:02:10,981 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-15T11:02:10,984 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:02:10,987 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-15T11:02:10,990 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-15T11:02:10,993 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:02:10,993 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-15T11:02:10,993 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668530993"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:10,994 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668530993"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:10,997 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:02:10,997 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f3cc2061a10174031b3261eff0c9da71, NAME => 'testtb-testExportWithResetTtl,,1731668509774.f3cc2061a10174031b3261eff0c9da71.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 647e0e6ebc95217ced15fb7a896dd27e, NAME => 'testtb-testExportWithResetTtl,1,1731668509774.647e0e6ebc95217ced15fb7a896dd27e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:02:10,997 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-15T11:02:10,997 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668530997"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:10,999 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-15T11:02:11,001 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T11:02:11,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 45 msec 2024-11-15T11:02:11,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-15T11:02:11,077 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-15T11:02:11,077 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T11:02:11,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-15T11:02:11,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-15T11:02:11,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-15T11:02:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-15T11:02:11,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-15T11:02:11,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-15T11:02:11,147 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=795 (was 770) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:41668 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1449161560_1 at /127.0.0.1:44184 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1449161560_1 at /127.0.0.1:52408 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1941 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 20640) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:32951 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:35498 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:39889 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39889 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:46648 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 792) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=616 (was 629), ProcessCount=22 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=1262 (was 2011) 2024-11-15T11:02:11,148 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-15T11:02:11,169 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=795, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=616, ProcessCount=22, AvailableMemoryMB=1260 2024-11-15T11:02:11,169 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-15T11:02:11,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:02:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:11,174 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:02:11,175 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:02:11,175 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-15T11:02:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T11:02:11,176 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:02:11,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741953_1129 (size=407) 2024-11-15T11:02:11,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741953_1129 (size=407) 2024-11-15T11:02:11,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741953_1129 (size=407) 2024-11-15T11:02:11,201 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a4ee56379a6de75f1ea4f4680844f2bc, NAME => 'testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:11,202 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9afba3da2cf2b077a9aa1ea57175b6dd, NAME => 'testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:11,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741954_1130 (size=68) 2024-11-15T11:02:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741954_1130 (size=68) 2024-11-15T11:02:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741954_1130 (size=68) 2024-11-15T11:02:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741955_1131 (size=68) 2024-11-15T11:02:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741955_1131 (size=68) 2024-11-15T11:02:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741955_1131 (size=68) 2024-11-15T11:02:11,224 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:11,224 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing a4ee56379a6de75f1ea4f4680844f2bc, disabling compactions & flushes 2024-11-15T11:02:11,224 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:11,224 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:11,224 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. after waiting 0 ms 2024-11-15T11:02:11,224 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:11,224 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:11,224 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for a4ee56379a6de75f1ea4f4680844f2bc: Waiting for close lock at 1731668531224Disabling compacts and flushes for region at 1731668531224Disabling writes for close at 1731668531224Writing region close event to WAL at 1731668531224Closed at 1731668531224 2024-11-15T11:02:11,225 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:11,225 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 9afba3da2cf2b077a9aa1ea57175b6dd, disabling compactions & flushes 2024-11-15T11:02:11,225 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:11,225 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:11,225 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. after waiting 0 ms 2024-11-15T11:02:11,225 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:11,225 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:11,225 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9afba3da2cf2b077a9aa1ea57175b6dd: Waiting for close lock at 1731668531225Disabling compacts and flushes for region at 1731668531225Disabling writes for close at 1731668531225Writing region close event to WAL at 1731668531225Closed at 1731668531225 2024-11-15T11:02:11,226 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:02:11,227 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731668531227"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668531227"}]},"ts":"1731668531227"} 2024-11-15T11:02:11,227 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731668531227"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668531227"}]},"ts":"1731668531227"} 2024-11-15T11:02:11,230 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:02:11,231 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:02:11,231 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668531231"}]},"ts":"1731668531231"} 2024-11-15T11:02:11,233 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-15T11:02:11,234 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:02:11,235 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:02:11,235 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:02:11,235 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:02:11,235 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:02:11,235 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:02:11,235 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:02:11,235 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:02:11,235 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:02:11,235 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:02:11,235 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:02:11,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, ASSIGN}] 2024-11-15T11:02:11,237 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, ASSIGN 2024-11-15T11:02:11,237 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, ASSIGN 2024-11-15T11:02:11,238 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:02:11,238 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, ASSIGN; state=OFFLINE, location=4cdab6d79b20,36113,1731668474097; forceNewPlan=false, retain=false 2024-11-15T11:02:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T11:02:11,389 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:02:11,389 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=9afba3da2cf2b077a9aa1ea57175b6dd, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:02:11,390 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=a4ee56379a6de75f1ea4f4680844f2bc, regionState=OPENING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:02:11,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, ASSIGN because future has completed 2024-11-15T11:02:11,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:02:11,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, ASSIGN because future has completed 2024-11-15T11:02:11,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:02:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T11:02:11,559 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:11,559 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => 9afba3da2cf2b077a9aa1ea57175b6dd, NAME => 'testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:02:11,560 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. service=AccessControlService 2024-11-15T11:02:11,560 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:02:11,560 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,561 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:11,561 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,561 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,561 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:11,561 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => a4ee56379a6de75f1ea4f4680844f2bc, NAME => 'testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:02:11,561 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. service=AccessControlService 2024-11-15T11:02:11,561 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:02:11,562 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,562 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:11,562 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,562 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,562 INFO [StoreOpener-9afba3da2cf2b077a9aa1ea57175b6dd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,563 INFO [StoreOpener-a4ee56379a6de75f1ea4f4680844f2bc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,564 INFO [StoreOpener-9afba3da2cf2b077a9aa1ea57175b6dd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9afba3da2cf2b077a9aa1ea57175b6dd columnFamilyName cf 2024-11-15T11:02:11,564 DEBUG [StoreOpener-9afba3da2cf2b077a9aa1ea57175b6dd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:02:11,565 INFO [StoreOpener-9afba3da2cf2b077a9aa1ea57175b6dd-1 {}] regionserver.HStore(327): Store=9afba3da2cf2b077a9aa1ea57175b6dd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:02:11,565 INFO [StoreOpener-a4ee56379a6de75f1ea4f4680844f2bc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4ee56379a6de75f1ea4f4680844f2bc columnFamilyName cf 2024-11-15T11:02:11,565 DEBUG [StoreOpener-a4ee56379a6de75f1ea4f4680844f2bc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:02:11,565 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,566 INFO [StoreOpener-a4ee56379a6de75f1ea4f4680844f2bc-1 {}] regionserver.HStore(327): Store=a4ee56379a6de75f1ea4f4680844f2bc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:02:11,566 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,567 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,567 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,567 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,567 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,567 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,568 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,568 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,568 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,570 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,570 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,572 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:02:11,573 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened a4ee56379a6de75f1ea4f4680844f2bc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67723676, jitterRate=0.009161412715911865}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:02:11,573 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,573 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for a4ee56379a6de75f1ea4f4680844f2bc: Running coprocessor pre-open hook at 1731668531562Writing region info on filesystem at 1731668531562Initializing all the Stores at 1731668531563 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668531563Cleaning up temporary data from old regions at 1731668531568 (+5 ms)Running coprocessor post-open hooks at 1731668531573 (+5 ms)Region opened successfully at 1731668531573 2024-11-15T11:02:11,575 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc., pid=62, masterSystemTime=1731668531557 2024-11-15T11:02:11,576 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:02:11,577 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:11,577 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:11,577 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened 9afba3da2cf2b077a9aa1ea57175b6dd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66563072, jitterRate=-0.0081329345703125}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:02:11,577 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,577 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for 9afba3da2cf2b077a9aa1ea57175b6dd: Running coprocessor pre-open hook at 1731668531561Writing region info on filesystem at 1731668531561Initializing all the Stores at 1731668531562 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668531562Cleaning up temporary data from old regions at 1731668531568 (+6 ms)Running coprocessor post-open hooks at 1731668531577 (+9 ms)Region opened successfully at 1731668531577 2024-11-15T11:02:11,577 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=a4ee56379a6de75f1ea4f4680844f2bc, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:02:11,579 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd., pid=61, masterSystemTime=1731668531555 2024-11-15T11:02:11,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:02:11,581 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:11,581 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:11,581 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=9afba3da2cf2b077a9aa1ea57175b6dd, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:02:11,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:02:11,587 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=59 2024-11-15T11:02:11,587 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc, server=4cdab6d79b20,36113,1731668474097 in 186 msec 2024-11-15T11:02:11,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, ASSIGN in 352 msec 2024-11-15T11:02:11,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=60 2024-11-15T11:02:11,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd, server=4cdab6d79b20,39207,1731668474152 in 194 msec 2024-11-15T11:02:11,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-11-15T11:02:11,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, ASSIGN in 354 msec 2024-11-15T11:02:11,593 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:02:11,593 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668531593"}]},"ts":"1731668531593"} 2024-11-15T11:02:11,595 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-15T11:02:11,596 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:02:11,596 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-15T11:02:11,600 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T11:02:11,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:11,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:11,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:11,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:11,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:11,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:11,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:11,605 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:11,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 433 msec 2024-11-15T11:02:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T11:02:11,806 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T11:02:11,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-11-15T11:02:11,806 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:02:11,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-11-15T11:02:11,812 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:02:11,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-11-15T11:02:11,812 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:02:11,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:02:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668531816 (current time:1731668531816). 2024-11-15T11:02:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:02:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-15T11:02:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:02:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d5e743, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:11,819 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:11,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:11,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:11,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@163c19de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:11,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:11,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:11,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:11,825 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37810, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:11,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f97ba31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:11,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:11,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:11,829 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:11,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:02:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:11,831 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:11,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fe420be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:11,834 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:11,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:11,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:11,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5be139c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:11,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:11,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:11,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:11,836 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37824, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:11,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7d67e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:11,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:11,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:11,841 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:11,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:11,845 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:02:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:11,846 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T11:02:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:02:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:02:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-15T11:02:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-15T11:02:11,850 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:02:11,851 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:02:11,855 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:02:11,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741956_1132 (size=170) 2024-11-15T11:02:11,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741956_1132 (size=170) 2024-11-15T11:02:11,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741956_1132 (size=170) 2024-11-15T11:02:11,879 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:02:11,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd}] 2024-11-15T11:02:11,880 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:11,880 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:11,949 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:02:11,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-15T11:02:12,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-15T11:02:12,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:12,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for 9afba3da2cf2b077a9aa1ea57175b6dd: 2024-11-15T11:02:12,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. for emptySnaptb0-testExportFileSystemState completed. 2024-11-15T11:02:12,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-15T11:02:12,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:12,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:02:12,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-15T11:02:12,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:12,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for a4ee56379a6de75f1ea4f4680844f2bc: 2024-11-15T11:02:12,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. for emptySnaptb0-testExportFileSystemState completed. 2024-11-15T11:02:12,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-15T11:02:12,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:12,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:02:12,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741957_1133 (size=71) 2024-11-15T11:02:12,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741958_1134 (size=71) 2024-11-15T11:02:12,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741958_1134 (size=71) 2024-11-15T11:02:12,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741957_1133 (size=71) 2024-11-15T11:02:12,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741957_1133 (size=71) 2024-11-15T11:02:12,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741958_1134 (size=71) 2024-11-15T11:02:12,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:12,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:12,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-15T11:02:12,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-15T11:02:12,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-15T11:02:12,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:12,059 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:12,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc in 182 msec 2024-11-15T11:02:12,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-15T11:02:12,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:12,064 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:12,068 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-15T11:02:12,068 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd in 186 msec 2024-11-15T11:02:12,068 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:02:12,069 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:02:12,070 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:02:12,070 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-15T11:02:12,071 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-15T11:02:12,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741959_1135 (size=552) 2024-11-15T11:02:12,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741959_1135 (size=552) 2024-11-15T11:02:12,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741959_1135 (size=552) 2024-11-15T11:02:12,102 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:02:12,107 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:02:12,108 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-15T11:02:12,109 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:02:12,110 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-15T11:02:12,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 263 msec 2024-11-15T11:02:12,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-15T11:02:12,166 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T11:02:12,171 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='038756699578f4116a7e5dc509d947c23', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:02:12,173 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='179005d3c226899c86a6beeefbc4006ef', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:12,177 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='22a4860c3f8c71939a86b6c2f86ce5ddc', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:12,180 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='36da5a98c591671b9048a9a95d9eb8cc4', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:12,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36113 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:02:12,184 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:02:12,185 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:02:12,187 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-15T11:02:12,188 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:12,188 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:02:12,189 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:02:12,194 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:02:12,199 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:02:12,202 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:02:12,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668532202 (current time:1731668532202). 2024-11-15T11:02:12,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:02:12,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-15T11:02:12,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:02:12,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f96e1a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:12,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:12,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:12,203 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:12,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:12,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:12,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b00b09e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:12,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:12,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:12,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:12,205 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37828, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:12,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c66c882, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:12,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:12,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:12,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:12,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42126, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:12,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:02:12,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:12,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:12,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:12,210 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:12,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c4995, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:12,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:12,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:12,212 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:12,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:12,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:12,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63872b99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:12,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:12,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:12,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:12,214 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:12,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47a3a35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:12,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:12,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:12,220 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:12,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:12,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:02:12,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:12,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:12,225 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T11:02:12,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:02:12,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:02:12,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-15T11:02:12,228 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:02:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T11:02:12,230 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:02:12,233 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:02:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741960_1136 (size=165) 2024-11-15T11:02:12,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741960_1136 (size=165) 2024-11-15T11:02:12,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741960_1136 (size=165) 2024-11-15T11:02:12,248 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:02:12,248 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd}] 2024-11-15T11:02:12,249 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:12,249 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:12,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T11:02:12,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-15T11:02:12,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-15T11:02:12,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:12,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:12,402 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing a4ee56379a6de75f1ea4f4680844f2bc 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-15T11:02:12,402 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing 9afba3da2cf2b077a9aa1ea57175b6dd 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-15T11:02:12,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/.tmp/cf/7804902d477649c581813e0a3a0595e0 is 71, key is 024a602f8e7ec9675d018b4b74f1c7e2/cf:q/1731668532180/Put/seqid=0 2024-11-15T11:02:12,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/.tmp/cf/d65727f7132e45d9ba3516fb38b44a7c is 71, key is 1515982e732ae2fd55a4a7745f1572d1/cf:q/1731668532184/Put/seqid=0 2024-11-15T11:02:12,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741962_1138 (size=8324) 2024-11-15T11:02:12,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741962_1138 (size=8324) 2024-11-15T11:02:12,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741961_1137 (size=5286) 2024-11-15T11:02:12,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741962_1138 (size=8324) 2024-11-15T11:02:12,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741961_1137 (size=5286) 2024-11-15T11:02:12,441 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/.tmp/cf/d65727f7132e45d9ba3516fb38b44a7c 2024-11-15T11:02:12,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741961_1137 (size=5286) 2024-11-15T11:02:12,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/.tmp/cf/d65727f7132e45d9ba3516fb38b44a7c as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/cf/d65727f7132e45d9ba3516fb38b44a7c 2024-11-15T11:02:12,457 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/cf/d65727f7132e45d9ba3516fb38b44a7c, entries=47, sequenceid=6, filesize=8.1 K 2024-11-15T11:02:12,459 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 9afba3da2cf2b077a9aa1ea57175b6dd in 56ms, sequenceid=6, compaction requested=false 2024-11-15T11:02:12,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-15T11:02:12,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for 9afba3da2cf2b077a9aa1ea57175b6dd: 2024-11-15T11:02:12,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. for snaptb0-testExportFileSystemState completed. 2024-11-15T11:02:12,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-15T11:02:12,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:12,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/cf/d65727f7132e45d9ba3516fb38b44a7c] hfiles 2024-11-15T11:02:12,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/cf/d65727f7132e45d9ba3516fb38b44a7c for snapshot=snaptb0-testExportFileSystemState 2024-11-15T11:02:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741963_1139 (size=110) 2024-11-15T11:02:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741963_1139 (size=110) 2024-11-15T11:02:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741963_1139 (size=110) 2024-11-15T11:02:12,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:12,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-15T11:02:12,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-15T11:02:12,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:12,477 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:12,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd in 230 msec 2024-11-15T11:02:12,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T11:02:12,841 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/.tmp/cf/7804902d477649c581813e0a3a0595e0 2024-11-15T11:02:12,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/.tmp/cf/7804902d477649c581813e0a3a0595e0 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/cf/7804902d477649c581813e0a3a0595e0 2024-11-15T11:02:12,853 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/cf/7804902d477649c581813e0a3a0595e0, entries=3, sequenceid=6, filesize=5.2 K 2024-11-15T11:02:12,854 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for a4ee56379a6de75f1ea4f4680844f2bc in 452ms, sequenceid=6, compaction requested=false 2024-11-15T11:02:12,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for a4ee56379a6de75f1ea4f4680844f2bc: 2024-11-15T11:02:12,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. for snaptb0-testExportFileSystemState completed. 2024-11-15T11:02:12,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-15T11:02:12,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:12,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/cf/7804902d477649c581813e0a3a0595e0] hfiles 2024-11-15T11:02:12,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/cf/7804902d477649c581813e0a3a0595e0 for snapshot=snaptb0-testExportFileSystemState 2024-11-15T11:02:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T11:02:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741964_1140 (size=110) 2024-11-15T11:02:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741964_1140 (size=110) 2024-11-15T11:02:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741964_1140 (size=110) 2024-11-15T11:02:12,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:12,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-15T11:02:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-15T11:02:12,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:12,863 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:12,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=66 2024-11-15T11:02:12,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc in 616 msec 2024-11-15T11:02:12,865 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:02:12,866 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:02:12,867 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:02:12,867 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-15T11:02:12,868 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-15T11:02:12,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741965_1141 (size=630) 2024-11-15T11:02:12,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741965_1141 (size=630) 2024-11-15T11:02:12,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741965_1141 (size=630) 2024-11-15T11:02:12,882 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:02:12,888 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:02:12,888 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-15T11:02:12,890 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:02:12,890 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-15T11:02:12,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 664 msec 2024-11-15T11:02:13,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T11:02:13,366 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T11:02:13,366 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366 2024-11-15T11:02:13,366 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:13,402 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:13,403 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-15T11:02:13,405 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:02:13,409 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-15T11:02:13,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741967_1143 (size=630) 2024-11-15T11:02:13,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741966_1142 (size=165) 2024-11-15T11:02:13,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741967_1143 (size=630) 2024-11-15T11:02:13,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741967_1143 (size=630) 2024-11-15T11:02:13,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741966_1142 (size=165) 2024-11-15T11:02:13,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741966_1142 (size=165) 2024-11-15T11:02:13,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:13,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:13,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:13,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-15T11:02:13,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-15T11:02:13,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-15T11:02:13,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-15T11:02:14,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-11369999779777065566.jar 2024-11-15T11:02:14,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-2078115750434143692.jar 2024-11-15T11:02:14,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:14,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:02:14,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:02:14,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:02:14,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:02:14,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:02:14,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:02:14,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:02:14,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:02:14,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:02:14,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:02:14,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:02:14,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:14,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:14,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:14,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:14,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:14,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:14,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:14,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741968_1144 (size=131440) 2024-11-15T11:02:14,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741968_1144 (size=131440) 2024-11-15T11:02:14,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741968_1144 (size=131440) 2024-11-15T11:02:14,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741969_1145 (size=4188619) 2024-11-15T11:02:14,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741969_1145 (size=4188619) 2024-11-15T11:02:14,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741969_1145 (size=4188619) 2024-11-15T11:02:14,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741970_1146 (size=1323991) 2024-11-15T11:02:14,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741970_1146 (size=1323991) 2024-11-15T11:02:14,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741970_1146 (size=1323991) 2024-11-15T11:02:14,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741971_1147 (size=903737) 2024-11-15T11:02:14,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741971_1147 (size=903737) 2024-11-15T11:02:14,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741971_1147 (size=903737) 2024-11-15T11:02:14,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741972_1148 (size=8360083) 2024-11-15T11:02:14,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741972_1148 (size=8360083) 2024-11-15T11:02:14,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741972_1148 (size=8360083) 2024-11-15T11:02:14,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741973_1149 (size=1877034) 2024-11-15T11:02:14,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741973_1149 (size=1877034) 2024-11-15T11:02:14,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741973_1149 (size=1877034) 2024-11-15T11:02:14,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741974_1150 (size=77835) 2024-11-15T11:02:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741974_1150 (size=77835) 2024-11-15T11:02:14,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741974_1150 (size=77835) 2024-11-15T11:02:14,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741975_1151 (size=30949) 2024-11-15T11:02:14,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741975_1151 (size=30949) 2024-11-15T11:02:14,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741975_1151 (size=30949) 2024-11-15T11:02:14,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741976_1152 (size=1597327) 2024-11-15T11:02:14,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741976_1152 (size=1597327) 2024-11-15T11:02:14,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741976_1152 (size=1597327) 2024-11-15T11:02:14,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741977_1153 (size=4695811) 2024-11-15T11:02:14,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741977_1153 (size=4695811) 2024-11-15T11:02:14,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741977_1153 (size=4695811) 2024-11-15T11:02:14,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741978_1154 (size=232957) 2024-11-15T11:02:14,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741978_1154 (size=232957) 2024-11-15T11:02:14,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741978_1154 (size=232957) 2024-11-15T11:02:14,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741979_1155 (size=127628) 2024-11-15T11:02:14,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741979_1155 (size=127628) 2024-11-15T11:02:14,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741979_1155 (size=127628) 2024-11-15T11:02:14,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741980_1156 (size=20406) 2024-11-15T11:02:14,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741980_1156 (size=20406) 2024-11-15T11:02:14,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741980_1156 (size=20406) 2024-11-15T11:02:14,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741981_1157 (size=5175431) 2024-11-15T11:02:14,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741981_1157 (size=5175431) 2024-11-15T11:02:14,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741981_1157 (size=5175431) 2024-11-15T11:02:14,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741982_1158 (size=217634) 2024-11-15T11:02:14,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741982_1158 (size=217634) 2024-11-15T11:02:14,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741982_1158 (size=217634) 2024-11-15T11:02:14,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741983_1159 (size=6424745) 2024-11-15T11:02:14,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741983_1159 (size=6424745) 2024-11-15T11:02:14,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741983_1159 (size=6424745) 2024-11-15T11:02:15,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741984_1160 (size=1832290) 2024-11-15T11:02:15,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741984_1160 (size=1832290) 2024-11-15T11:02:15,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741984_1160 (size=1832290) 2024-11-15T11:02:15,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741985_1161 (size=322274) 2024-11-15T11:02:15,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741985_1161 (size=322274) 2024-11-15T11:02:15,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741985_1161 (size=322274) 2024-11-15T11:02:15,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741986_1162 (size=503880) 2024-11-15T11:02:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741986_1162 (size=503880) 2024-11-15T11:02:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741986_1162 (size=503880) 2024-11-15T11:02:15,120 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0002_000001 (auth:SIMPLE) from 127.0.0.1:33006 2024-11-15T11:02:15,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741987_1163 (size=29229) 2024-11-15T11:02:15,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741987_1163 (size=29229) 2024-11-15T11:02:15,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741987_1163 (size=29229) 2024-11-15T11:02:15,146 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_1/usercache/jenkins/appcache/application_1731668480794_0002/container_1731668480794_0002_01_000001/launch_container.sh] 2024-11-15T11:02:15,146 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_1/usercache/jenkins/appcache/application_1731668480794_0002/container_1731668480794_0002_01_000001/container_tokens] 2024-11-15T11:02:15,146 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_1/usercache/jenkins/appcache/application_1731668480794_0002/container_1731668480794_0002_01_000001/sysfs] 2024-11-15T11:02:15,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741988_1164 (size=440656) 2024-11-15T11:02:15,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741988_1164 (size=440656) 2024-11-15T11:02:15,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741988_1164 (size=440656) 2024-11-15T11:02:15,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741989_1165 (size=24096) 2024-11-15T11:02:15,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741989_1165 (size=24096) 2024-11-15T11:02:15,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741989_1165 (size=24096) 2024-11-15T11:02:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741990_1166 (size=111872) 2024-11-15T11:02:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741990_1166 (size=111872) 2024-11-15T11:02:15,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741990_1166 (size=111872) 2024-11-15T11:02:15,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741991_1167 (size=45609) 2024-11-15T11:02:15,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741991_1167 (size=45609) 2024-11-15T11:02:15,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741991_1167 (size=45609) 2024-11-15T11:02:15,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741992_1168 (size=136454) 2024-11-15T11:02:15,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741992_1168 (size=136454) 2024-11-15T11:02:15,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741992_1168 (size=136454) 2024-11-15T11:02:15,192 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:02:15,195 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-15T11:02:15,196 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:02:15,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741993_1169 (size=344) 2024-11-15T11:02:15,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741993_1169 (size=344) 2024-11-15T11:02:15,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741993_1169 (size=344) 2024-11-15T11:02:15,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741994_1170 (size=15) 2024-11-15T11:02:15,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741994_1170 (size=15) 2024-11-15T11:02:15,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741994_1170 (size=15) 2024-11-15T11:02:15,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741995_1171 (size=303740) 2024-11-15T11:02:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741995_1171 (size=303740) 2024-11-15T11:02:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741995_1171 (size=303740) 2024-11-15T11:02:15,264 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:02:15,264 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:02:15,889 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:02:15,947 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0003_000001 (auth:SIMPLE) from 127.0.0.1:60674 2024-11-15T11:02:18,090 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a4ee56379a6de75f1ea4f4680844f2bc changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:02:18,091 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 28dfeddf0af1b0d2554866c3d938dd83 changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:02:18,091 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 9afba3da2cf2b077a9aa1ea57175b6dd changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:02:21,480 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0003_000001 (auth:SIMPLE) from 127.0.0.1:41286 2024-11-15T11:02:21,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741996_1172 (size=349390) 2024-11-15T11:02:21,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741996_1172 (size=349390) 2024-11-15T11:02:21,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741996_1172 (size=349390) 2024-11-15T11:02:23,762 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0003_000001 (auth:SIMPLE) from 127.0.0.1:39516 2024-11-15T11:02:27,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741997_1173 (size=8324) 2024-11-15T11:02:27,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741997_1173 (size=8324) 2024-11-15T11:02:27,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741997_1173 (size=8324) 2024-11-15T11:02:27,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741998_1174 (size=5286) 2024-11-15T11:02:27,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741998_1174 (size=5286) 2024-11-15T11:02:27,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741998_1174 (size=5286) 2024-11-15T11:02:27,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741999_1175 (size=17422) 2024-11-15T11:02:27,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741999_1175 (size=17422) 2024-11-15T11:02:27,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741999_1175 (size=17422) 2024-11-15T11:02:27,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742000_1176 (size=465) 2024-11-15T11:02:27,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742000_1176 (size=465) 2024-11-15T11:02:27,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742000_1176 (size=465) 2024-11-15T11:02:27,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742001_1177 (size=17422) 2024-11-15T11:02:27,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742001_1177 (size=17422) 2024-11-15T11:02:27,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742001_1177 (size=17422) 2024-11-15T11:02:27,679 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0003/container_1731668480794_0003_01_000002/launch_container.sh] 2024-11-15T11:02:27,679 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0003/container_1731668480794_0003_01_000002/container_tokens] 2024-11-15T11:02:27,679 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0003/container_1731668480794_0003_01_000002/sysfs] 2024-11-15T11:02:27,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742002_1178 (size=349390) 2024-11-15T11:02:27,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742002_1178 (size=349390) 2024-11-15T11:02:27,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742002_1178 (size=349390) 2024-11-15T11:02:27,699 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0003_000001 (auth:SIMPLE) from 127.0.0.1:39520 2024-11-15T11:02:28,752 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:02:28,754 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:02:28,760 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-15T11:02:28,760 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:02:28,761 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:02:28,761 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-15T11:02:28,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-15T11:02:28,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-15T11:02:28,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-15T11:02:28,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-15T11:02:28,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668533366/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-15T11:02:28,769 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-15T11:02:28,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:28,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-15T11:02:28,772 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668548772"}]},"ts":"1731668548772"} 2024-11-15T11:02:28,775 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-15T11:02:28,775 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-15T11:02:28,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-15T11:02:28,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, UNASSIGN}] 2024-11-15T11:02:28,778 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, UNASSIGN 2024-11-15T11:02:28,778 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, UNASSIGN 2024-11-15T11:02:28,779 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=9afba3da2cf2b077a9aa1ea57175b6dd, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:02:28,779 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=a4ee56379a6de75f1ea4f4680844f2bc, regionState=CLOSING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:02:28,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, UNASSIGN because future has completed 2024-11-15T11:02:28,781 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:02:28,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:02:28,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, UNASSIGN because future has completed 2024-11-15T11:02:28,782 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:02:28,782 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:02:28,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-15T11:02:28,933 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing a4ee56379a6de75f1ea4f4680844f2bc, disabling compactions & flushes 2024-11-15T11:02:28,934 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:28,934 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. after waiting 0 ms 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing 9afba3da2cf2b077a9aa1ea57175b6dd, disabling compactions & flushes 2024-11-15T11:02:28,934 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. after waiting 0 ms 2024-11-15T11:02:28,934 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:28,940 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:02:28,940 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:02:28,940 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:02:28,940 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc. 2024-11-15T11:02:28,940 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:02:28,941 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for a4ee56379a6de75f1ea4f4680844f2bc: Waiting for close lock at 1731668548934Running coprocessor pre-close hooks at 1731668548934Disabling compacts and flushes for region at 1731668548934Disabling writes for close at 1731668548934Writing region close event to WAL at 1731668548935 (+1 ms)Running coprocessor post-close hooks at 1731668548940 (+5 ms)Closed at 1731668548940 2024-11-15T11:02:28,941 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd. 2024-11-15T11:02:28,941 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for 9afba3da2cf2b077a9aa1ea57175b6dd: Waiting for close lock at 1731668548934Running coprocessor pre-close hooks at 1731668548934Disabling compacts and flushes for region at 1731668548934Disabling writes for close at 1731668548934Writing region close event to WAL at 1731668548935 (+1 ms)Running coprocessor post-close hooks at 1731668548940 (+5 ms)Closed at 1731668548941 (+1 ms) 2024-11-15T11:02:28,943 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:28,943 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=a4ee56379a6de75f1ea4f4680844f2bc, regionState=CLOSED 2024-11-15T11:02:28,943 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed 9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:28,944 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=9afba3da2cf2b077a9aa1ea57175b6dd, regionState=CLOSED 2024-11-15T11:02:28,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:02:28,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:02:28,948 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-15T11:02:28,948 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure a4ee56379a6de75f1ea4f4680844f2bc, server=4cdab6d79b20,36113,1731668474097 in 165 msec 2024-11-15T11:02:28,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=72 2024-11-15T11:02:28,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a4ee56379a6de75f1ea4f4680844f2bc, UNASSIGN in 171 msec 2024-11-15T11:02:28,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure 9afba3da2cf2b077a9aa1ea57175b6dd, server=4cdab6d79b20,39207,1731668474152 in 166 msec 2024-11-15T11:02:28,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=70 2024-11-15T11:02:28,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9afba3da2cf2b077a9aa1ea57175b6dd, UNASSIGN in 173 msec 2024-11-15T11:02:28,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-15T11:02:28,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 177 msec 2024-11-15T11:02:28,956 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668548955"}]},"ts":"1731668548955"} 2024-11-15T11:02:28,957 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-15T11:02:28,957 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-15T11:02:28,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 189 msec 2024-11-15T11:02:29,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-15T11:02:29,086 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T11:02:29,086 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-15T11:02:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:29,088 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:29,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-15T11:02:29,095 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-15T11:02:29,096 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:29,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T11:02:29,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T11:02:29,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T11:02:29,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T11:02:29,100 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-15T11:02:29,100 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-15T11:02:29,102 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:29,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T11:02:29,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data null 2024-11-15T11:02:29,102 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:29,102 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:02:29,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data null 2024-11-15T11:02:29,102 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:02:29,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T11:02:29,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-15T11:02:29,106 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/recovered.edits] 2024-11-15T11:02:29,107 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/recovered.edits] 2024-11-15T11:02:29,115 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/cf/d65727f7132e45d9ba3516fb38b44a7c to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/cf/d65727f7132e45d9ba3516fb38b44a7c 2024-11-15T11:02:29,116 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/cf/7804902d477649c581813e0a3a0595e0 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/cf/7804902d477649c581813e0a3a0595e0 2024-11-15T11:02:29,119 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd/recovered.edits/9.seqid 2024-11-15T11:02:29,120 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/9afba3da2cf2b077a9aa1ea57175b6dd 2024-11-15T11:02:29,120 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc/recovered.edits/9.seqid 2024-11-15T11:02:29,121 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemState/a4ee56379a6de75f1ea4f4680844f2bc 2024-11-15T11:02:29,121 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-15T11:02:29,123 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:29,126 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-15T11:02:29,129 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-15T11:02:29,130 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:29,130 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-15T11:02:29,130 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668549130"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:29,130 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668549130"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:29,133 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:02:29,133 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => a4ee56379a6de75f1ea4f4680844f2bc, NAME => 'testtb-testExportFileSystemState,,1731668531171.a4ee56379a6de75f1ea4f4680844f2bc.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9afba3da2cf2b077a9aa1ea57175b6dd, NAME => 'testtb-testExportFileSystemState,1,1731668531171.9afba3da2cf2b077a9aa1ea57175b6dd.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:02:29,133 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-15T11:02:29,133 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668549133"}]},"ts":"9223372036854775807"} 2024-11-15T11:02:29,135 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-15T11:02:29,135 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T11:02:29,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 49 msec 2024-11-15T11:02:29,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-15T11:02:29,216 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-15T11:02:29,216 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T11:02:29,224 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-15T11:02:29,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-15T11:02:29,228 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-15T11:02:29,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-15T11:02:29,254 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=786 (was 795), OpenFileDescriptor=795 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=569 (was 616), ProcessCount=20 (was 22), AvailableMemoryMB=1214 (was 1260) 2024-11-15T11:02:29,254 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-15T11:02:29,273 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=786, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=569, ProcessCount=19, AvailableMemoryMB=1215 2024-11-15T11:02:29,273 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-15T11:02:29,275 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:02:29,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:02:29,278 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:02:29,278 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:02:29,278 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-15T11:02:29,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T11:02:29,280 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:02:29,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742003_1179 (size=404) 2024-11-15T11:02:29,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742003_1179 (size=404) 2024-11-15T11:02:29,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742003_1179 (size=404) 2024-11-15T11:02:29,293 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ce802a4ff94d9f00553d85d08c2eb68f, NAME => 'testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:29,293 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 793eac0e5bfab61ca7fe5e8329a9314a, NAME => 'testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:29,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742005_1181 (size=65) 2024-11-15T11:02:29,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742005_1181 (size=65) 2024-11-15T11:02:29,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742004_1180 (size=65) 2024-11-15T11:02:29,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742004_1180 (size=65) 2024-11-15T11:02:29,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742005_1181 (size=65) 2024-11-15T11:02:29,303 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:29,303 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 793eac0e5bfab61ca7fe5e8329a9314a, disabling compactions & flushes 2024-11-15T11:02:29,303 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:29,303 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:29,303 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. after waiting 0 ms 2024-11-15T11:02:29,303 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:29,303 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:29,303 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 793eac0e5bfab61ca7fe5e8329a9314a: Waiting for close lock at 1731668549303Disabling compacts and flushes for region at 1731668549303Disabling writes for close at 1731668549303Writing region close event to WAL at 1731668549303Closed at 1731668549303 2024-11-15T11:02:29,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742004_1180 (size=65) 2024-11-15T11:02:29,304 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:29,304 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing ce802a4ff94d9f00553d85d08c2eb68f, disabling compactions & flushes 2024-11-15T11:02:29,304 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:29,304 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:29,304 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. after waiting 0 ms 2024-11-15T11:02:29,304 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:29,304 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:29,304 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for ce802a4ff94d9f00553d85d08c2eb68f: Waiting for close lock at 1731668549304Disabling compacts and flushes for region at 1731668549304Disabling writes for close at 1731668549304Writing region close event to WAL at 1731668549304Closed at 1731668549304 2024-11-15T11:02:29,306 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:02:29,306 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731668549306"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668549306"}]},"ts":"1731668549306"} 2024-11-15T11:02:29,306 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731668549306"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668549306"}]},"ts":"1731668549306"} 2024-11-15T11:02:29,309 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:02:29,310 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:02:29,310 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668549310"}]},"ts":"1731668549310"} 2024-11-15T11:02:29,312 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-15T11:02:29,313 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:02:29,314 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:02:29,314 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:02:29,314 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:02:29,314 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:02:29,314 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:02:29,314 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:02:29,314 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:02:29,314 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:02:29,314 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:02:29,314 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:02:29,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, ASSIGN}] 2024-11-15T11:02:29,315 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, ASSIGN 2024-11-15T11:02:29,316 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, ASSIGN 2024-11-15T11:02:29,316 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:02:29,317 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:02:29,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T11:02:29,467 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:02:29,468 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=793eac0e5bfab61ca7fe5e8329a9314a, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:02:29,468 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=ce802a4ff94d9f00553d85d08c2eb68f, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:02:29,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, ASSIGN because future has completed 2024-11-15T11:02:29,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:02:29,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, ASSIGN because future has completed 2024-11-15T11:02:29,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:02:29,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T11:02:29,627 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:29,627 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:29,627 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => 793eac0e5bfab61ca7fe5e8329a9314a, NAME => 'testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:02:29,627 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => ce802a4ff94d9f00553d85d08c2eb68f, NAME => 'testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. service=AccessControlService 2024-11-15T11:02:29,628 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. service=AccessControlService 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:29,628 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,628 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:02:29,629 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,629 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,630 INFO [StoreOpener-793eac0e5bfab61ca7fe5e8329a9314a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,630 INFO [StoreOpener-ce802a4ff94d9f00553d85d08c2eb68f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,632 INFO [StoreOpener-ce802a4ff94d9f00553d85d08c2eb68f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ce802a4ff94d9f00553d85d08c2eb68f columnFamilyName cf 2024-11-15T11:02:29,632 INFO [StoreOpener-793eac0e5bfab61ca7fe5e8329a9314a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 793eac0e5bfab61ca7fe5e8329a9314a columnFamilyName cf 2024-11-15T11:02:29,632 DEBUG [StoreOpener-793eac0e5bfab61ca7fe5e8329a9314a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:02:29,632 DEBUG [StoreOpener-ce802a4ff94d9f00553d85d08c2eb68f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:02:29,632 INFO [StoreOpener-ce802a4ff94d9f00553d85d08c2eb68f-1 {}] regionserver.HStore(327): Store=ce802a4ff94d9f00553d85d08c2eb68f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:02:29,632 INFO [StoreOpener-793eac0e5bfab61ca7fe5e8329a9314a-1 {}] regionserver.HStore(327): Store=793eac0e5bfab61ca7fe5e8329a9314a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:02:29,632 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,632 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,633 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,633 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,634 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,634 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,634 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,634 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,634 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,634 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,636 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,636 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,638 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:02:29,639 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened 793eac0e5bfab61ca7fe5e8329a9314a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62318293, jitterRate=-0.07138507068157196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:02:29,639 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,639 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:02:29,640 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for 793eac0e5bfab61ca7fe5e8329a9314a: Running coprocessor pre-open hook at 1731668549628Writing region info on filesystem at 1731668549628Initializing all the Stores at 1731668549629 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668549629Cleaning up temporary data from old regions at 1731668549634 (+5 ms)Running coprocessor post-open hooks at 1731668549639 (+5 ms)Region opened successfully at 1731668549640 (+1 ms) 2024-11-15T11:02:29,640 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened ce802a4ff94d9f00553d85d08c2eb68f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69129222, jitterRate=0.030105680227279663}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:02:29,640 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,640 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for ce802a4ff94d9f00553d85d08c2eb68f: Running coprocessor pre-open hook at 1731668549629Writing region info on filesystem at 1731668549629Initializing all the Stores at 1731668549630 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668549630Cleaning up temporary data from old regions at 1731668549634 (+4 ms)Running coprocessor post-open hooks at 1731668549640 (+6 ms)Region opened successfully at 1731668549640 2024-11-15T11:02:29,641 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., pid=80, masterSystemTime=1731668549623 2024-11-15T11:02:29,641 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f., pid=79, masterSystemTime=1731668549622 2024-11-15T11:02:29,643 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:29,643 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:29,643 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=793eac0e5bfab61ca7fe5e8329a9314a, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:02:29,644 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:29,644 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:29,645 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=ce802a4ff94d9f00553d85d08c2eb68f, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:02:29,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:02:29,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:02:29,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=78 2024-11-15T11:02:29,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a, server=4cdab6d79b20,39207,1731668474152 in 176 msec 2024-11-15T11:02:29,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=77 2024-11-15T11:02:29,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f, server=4cdab6d79b20,45865,1731668473964 in 177 msec 2024-11-15T11:02:29,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, ASSIGN in 336 msec 2024-11-15T11:02:29,652 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-15T11:02:29,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, ASSIGN in 336 msec 2024-11-15T11:02:29,660 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:02:29,660 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668549660"}]},"ts":"1731668549660"} 2024-11-15T11:02:29,662 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-15T11:02:29,664 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:02:29,664 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-15T11:02:29,668 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-15T11:02:29,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:02:29,676 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:29,677 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:29,677 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:29,677 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T11:02:29,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 401 msec 2024-11-15T11:02:29,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T11:02:29,906 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T11:02:29,906 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-11-15T11:02:29,907 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:02:29,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-11-15T11:02:29,911 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:02:29,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-11-15T11:02:29,912 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T11:02:29,915 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T11:02:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668549915 (current time:1731668549915). 2024-11-15T11:02:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:02:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-15T11:02:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:02:29,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d28b7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:29,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:29,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:29,918 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:29,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:29,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:29,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@421212c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:29,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:29,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:29,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:29,920 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54820, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:29,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@396ef629, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:29,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:29,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:29,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:29,923 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47990, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:29,925 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:02:29,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:29,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:29,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:29,926 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:29,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5be46ba6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:29,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:29,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:29,929 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:29,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:29,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:29,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ddc4718, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:29,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:29,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:29,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:29,932 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54834, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:29,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41ba8416, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:29,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:29,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:29,937 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:29,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:29,941 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:02:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-15T11:02:29,941 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:29,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:02:29,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T11:02:29,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-15T11:02:29,945 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:02:29,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-15T11:02:29,946 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:02:29,949 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:02:29,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742006_1182 (size=161) 2024-11-15T11:02:29,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742006_1182 (size=161) 2024-11-15T11:02:29,970 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:02:29,970 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a}] 2024-11-15T11:02:29,971 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:29,971 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:29,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742006_1182 (size=161) 2024-11-15T11:02:30,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-15T11:02:30,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-15T11:02:30,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:30,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for 793eac0e5bfab61ca7fe5e8329a9314a: 2024-11-15T11:02:30,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-15T11:02:30,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. for emptySnaptb0-testConsecutiveExports completed. 2024-11-15T11:02:30,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-15T11:02:30,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:30,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:02:30,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:30,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for ce802a4ff94d9f00553d85d08c2eb68f: 2024-11-15T11:02:30,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. for emptySnaptb0-testConsecutiveExports completed. 2024-11-15T11:02:30,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-15T11:02:30,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:30,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:02:30,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742007_1183 (size=68) 2024-11-15T11:02:30,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742007_1183 (size=68) 2024-11-15T11:02:30,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742007_1183 (size=68) 2024-11-15T11:02:30,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742008_1184 (size=68) 2024-11-15T11:02:30,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742008_1184 (size=68) 2024-11-15T11:02:30,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742008_1184 (size=68) 2024-11-15T11:02:30,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:30,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-15T11:02:30,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:30,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-15T11:02:30,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-15T11:02:30,152 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:30,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-15T11:02:30,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:30,152 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:30,152 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:30,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a in 183 msec 2024-11-15T11:02:30,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=82, resume processing ppid=81 2024-11-15T11:02:30,157 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:02:30,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f in 183 msec 2024-11-15T11:02:30,162 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:02:30,163 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:02:30,163 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-15T11:02:30,165 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-15T11:02:30,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742009_1185 (size=543) 2024-11-15T11:02:30,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742009_1185 (size=543) 2024-11-15T11:02:30,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742009_1185 (size=543) 2024-11-15T11:02:30,198 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:02:30,207 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:02:30,208 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-15T11:02:30,210 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:02:30,210 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-15T11:02:30,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 267 msec 2024-11-15T11:02:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-15T11:02:30,267 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T11:02:30,274 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='087f7d4e9f3435284f62ae796bd0bc2d5', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:02:30,275 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1c65e83889ceeaaf2be7df1ff742329f2', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,278 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3235b6388a7de3936706b585561b6b6ec', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,279 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='2c33424725694990dee358f08b9a8fbd7', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,280 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='41cb7c2a53eb66b438f8389eb6e4811b8', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,282 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='698f344586fad76d6d60e0c9ad8db6804', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,283 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:02:30,283 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='5ba60997a4dadd6969698c608245fb08b', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,284 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='d8ea54b0d3c8a755c731d4f6e2d46c6d', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:02:30,293 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T11:02:30,297 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-15T11:02:30,297 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:30,297 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:02:30,299 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T11:02:30,306 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T11:02:30,313 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T11:02:30,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T11:02:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668550316 (current time:1731668550316). 2024-11-15T11:02:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:02:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-15T11:02:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:02:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25c2c19a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:30,319 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:30,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:30,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:30,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@391df7e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:30,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:30,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:30,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:30,322 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54854, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:30,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@364af524, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:30,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:30,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:30,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:30,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:02:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:30,327 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47ff3278, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:02:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:02:30,328 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:02:30,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:02:30,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:02:30,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9cd744e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:30,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:02:30,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:02:30,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:30,330 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54870, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:02:30,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@656ee202, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:02:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:02:30,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:02:30,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:02:30,334 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:02:30,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:02:30,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:02:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:02:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:02:30,337 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:02:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-15T11:02:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:02:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T11:02:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-15T11:02:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-15T11:02:30,341 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:02:30,342 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:02:30,345 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:02:30,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742010_1186 (size=156) 2024-11-15T11:02:30,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742010_1186 (size=156) 2024-11-15T11:02:30,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742010_1186 (size=156) 2024-11-15T11:02:30,355 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:02:30,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a}] 2024-11-15T11:02:30,357 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:30,357 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-15T11:02:30,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-15T11:02:30,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-15T11:02:30,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:30,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:30,510 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing ce802a4ff94d9f00553d85d08c2eb68f 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-15T11:02:30,510 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing 793eac0e5bfab61ca7fe5e8329a9314a 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-15T11:02:30,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/.tmp/cf/b6792af580854f6382f2b1b1104a1b0a is 71, key is 017f6caf3e6d95178f5f53a141a170a6/cf:q/1731668550282/Put/seqid=0 2024-11-15T11:02:30,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/.tmp/cf/83eb3ebf9e8f4173bec9cf92b75426c6 is 71, key is 12dc39c729e43f1886768602fdcfaf3b/cf:q/1731668550286/Put/seqid=0 2024-11-15T11:02:30,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742011_1187 (size=5422) 2024-11-15T11:02:30,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742011_1187 (size=5422) 2024-11-15T11:02:30,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742011_1187 (size=5422) 2024-11-15T11:02:30,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/.tmp/cf/b6792af580854f6382f2b1b1104a1b0a 2024-11-15T11:02:30,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742012_1188 (size=8188) 2024-11-15T11:02:30,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742012_1188 (size=8188) 2024-11-15T11:02:30,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742012_1188 (size=8188) 2024-11-15T11:02:30,568 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/.tmp/cf/83eb3ebf9e8f4173bec9cf92b75426c6 2024-11-15T11:02:30,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/.tmp/cf/b6792af580854f6382f2b1b1104a1b0a as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/cf/b6792af580854f6382f2b1b1104a1b0a 2024-11-15T11:02:30,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/.tmp/cf/83eb3ebf9e8f4173bec9cf92b75426c6 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/cf/83eb3ebf9e8f4173bec9cf92b75426c6 2024-11-15T11:02:30,586 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/cf/83eb3ebf9e8f4173bec9cf92b75426c6, entries=45, sequenceid=6, filesize=8.0 K 2024-11-15T11:02:30,586 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/cf/b6792af580854f6382f2b1b1104a1b0a, entries=5, sequenceid=6, filesize=5.3 K 2024-11-15T11:02:30,587 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 793eac0e5bfab61ca7fe5e8329a9314a in 78ms, sequenceid=6, compaction requested=false 2024-11-15T11:02:30,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-15T11:02:30,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for 793eac0e5bfab61ca7fe5e8329a9314a: 2024-11-15T11:02:30,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. for snaptb0-testConsecutiveExports completed. 2024-11-15T11:02:30,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-15T11:02:30,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:30,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/cf/83eb3ebf9e8f4173bec9cf92b75426c6] hfiles 2024-11-15T11:02:30,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/cf/83eb3ebf9e8f4173bec9cf92b75426c6 for snapshot=snaptb0-testConsecutiveExports 2024-11-15T11:02:30,594 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for ce802a4ff94d9f00553d85d08c2eb68f in 85ms, sequenceid=6, compaction requested=false 2024-11-15T11:02:30,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for ce802a4ff94d9f00553d85d08c2eb68f: 2024-11-15T11:02:30,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. for snaptb0-testConsecutiveExports completed. 2024-11-15T11:02:30,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-15T11:02:30,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:02:30,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/cf/b6792af580854f6382f2b1b1104a1b0a] hfiles 2024-11-15T11:02:30,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/cf/b6792af580854f6382f2b1b1104a1b0a for snapshot=snaptb0-testConsecutiveExports 2024-11-15T11:02:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742013_1189 (size=107) 2024-11-15T11:02:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742013_1189 (size=107) 2024-11-15T11:02:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742013_1189 (size=107) 2024-11-15T11:02:30,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:02:30,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-15T11:02:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-15T11:02:30,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:30,608 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:02:30,612 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a in 255 msec 2024-11-15T11:02:30,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742014_1190 (size=107) 2024-11-15T11:02:30,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742014_1190 (size=107) 2024-11-15T11:02:30,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742014_1190 (size=107) 2024-11-15T11:02:30,616 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:02:30,616 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-15T11:02:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-15T11:02:30,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:30,617 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:02:30,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=84 2024-11-15T11:02:30,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f in 263 msec 2024-11-15T11:02:30,621 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:02:30,622 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:02:30,623 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:02:30,623 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-15T11:02:30,625 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T11:02:30,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742015_1191 (size=621) 2024-11-15T11:02:30,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742015_1191 (size=621) 2024-11-15T11:02:30,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742015_1191 (size=621) 2024-11-15T11:02:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-15T11:02:30,655 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:02:30,674 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:02:30,675 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T11:02:30,681 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:02:30,682 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-15T11:02:30,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 344 msec 2024-11-15T11:02:30,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-15T11:02:30,965 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T11:02:30,966 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965 2024-11-15T11:02:30,966 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:31,002 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:31,002 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@3cf44f9e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T11:02:31,004 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:02:31,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T11:02:31,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:31,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:31,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-10744363644312259246.jar 2024-11-15T11:02:32,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-4383883013032121347.jar 2024-11-15T11:02:32,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:32,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:02:32,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:02:32,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:02:32,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:02:32,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:02:32,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:02:32,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:02:32,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:02:32,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:02:32,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:02:32,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:02:32,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:32,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:32,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:32,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:32,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:32,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:32,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742016_1192 (size=131440) 2024-11-15T11:02:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742016_1192 (size=131440) 2024-11-15T11:02:32,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742016_1192 (size=131440) 2024-11-15T11:02:32,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742017_1193 (size=4188619) 2024-11-15T11:02:32,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742017_1193 (size=4188619) 2024-11-15T11:02:32,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742017_1193 (size=4188619) 2024-11-15T11:02:32,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742018_1194 (size=1323991) 2024-11-15T11:02:32,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742018_1194 (size=1323991) 2024-11-15T11:02:32,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742018_1194 (size=1323991) 2024-11-15T11:02:32,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742019_1195 (size=903737) 2024-11-15T11:02:32,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742019_1195 (size=903737) 2024-11-15T11:02:32,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742019_1195 (size=903737) 2024-11-15T11:02:32,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742020_1196 (size=8360083) 2024-11-15T11:02:32,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742020_1196 (size=8360083) 2024-11-15T11:02:32,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742020_1196 (size=8360083) 2024-11-15T11:02:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742021_1197 (size=1877034) 2024-11-15T11:02:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742021_1197 (size=1877034) 2024-11-15T11:02:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742021_1197 (size=1877034) 2024-11-15T11:02:32,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742022_1198 (size=77835) 2024-11-15T11:02:32,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742022_1198 (size=77835) 2024-11-15T11:02:32,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742022_1198 (size=77835) 2024-11-15T11:02:32,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742023_1199 (size=30949) 2024-11-15T11:02:32,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742023_1199 (size=30949) 2024-11-15T11:02:32,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742023_1199 (size=30949) 2024-11-15T11:02:32,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742024_1200 (size=1597327) 2024-11-15T11:02:32,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742024_1200 (size=1597327) 2024-11-15T11:02:32,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742024_1200 (size=1597327) 2024-11-15T11:02:32,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742025_1201 (size=4695811) 2024-11-15T11:02:32,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742025_1201 (size=4695811) 2024-11-15T11:02:32,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742025_1201 (size=4695811) 2024-11-15T11:02:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742026_1202 (size=232957) 2024-11-15T11:02:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742026_1202 (size=232957) 2024-11-15T11:02:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742026_1202 (size=232957) 2024-11-15T11:02:32,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742027_1203 (size=127628) 2024-11-15T11:02:32,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742027_1203 (size=127628) 2024-11-15T11:02:32,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742027_1203 (size=127628) 2024-11-15T11:02:32,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742028_1204 (size=20406) 2024-11-15T11:02:32,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742028_1204 (size=20406) 2024-11-15T11:02:32,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742028_1204 (size=20406) 2024-11-15T11:02:32,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742029_1205 (size=5175431) 2024-11-15T11:02:32,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742029_1205 (size=5175431) 2024-11-15T11:02:32,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742029_1205 (size=5175431) 2024-11-15T11:02:32,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742030_1206 (size=217634) 2024-11-15T11:02:32,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742030_1206 (size=217634) 2024-11-15T11:02:32,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742030_1206 (size=217634) 2024-11-15T11:02:32,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742031_1207 (size=6424745) 2024-11-15T11:02:32,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742031_1207 (size=6424745) 2024-11-15T11:02:32,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742031_1207 (size=6424745) 2024-11-15T11:02:32,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742032_1208 (size=440656) 2024-11-15T11:02:32,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742032_1208 (size=440656) 2024-11-15T11:02:32,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742032_1208 (size=440656) 2024-11-15T11:02:32,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742033_1209 (size=1832290) 2024-11-15T11:02:32,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742033_1209 (size=1832290) 2024-11-15T11:02:32,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742033_1209 (size=1832290) 2024-11-15T11:02:32,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742034_1210 (size=322274) 2024-11-15T11:02:32,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742034_1210 (size=322274) 2024-11-15T11:02:32,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742034_1210 (size=322274) 2024-11-15T11:02:32,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742035_1211 (size=503880) 2024-11-15T11:02:32,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742035_1211 (size=503880) 2024-11-15T11:02:32,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742035_1211 (size=503880) 2024-11-15T11:02:32,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742036_1212 (size=29229) 2024-11-15T11:02:32,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742036_1212 (size=29229) 2024-11-15T11:02:32,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742036_1212 (size=29229) 2024-11-15T11:02:32,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742037_1213 (size=24096) 2024-11-15T11:02:32,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742037_1213 (size=24096) 2024-11-15T11:02:32,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742037_1213 (size=24096) 2024-11-15T11:02:32,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742038_1214 (size=111872) 2024-11-15T11:02:32,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742038_1214 (size=111872) 2024-11-15T11:02:32,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742038_1214 (size=111872) 2024-11-15T11:02:32,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742039_1215 (size=45609) 2024-11-15T11:02:32,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742039_1215 (size=45609) 2024-11-15T11:02:32,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742039_1215 (size=45609) 2024-11-15T11:02:32,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742040_1216 (size=136454) 2024-11-15T11:02:32,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742040_1216 (size=136454) 2024-11-15T11:02:32,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742040_1216 (size=136454) 2024-11-15T11:02:32,835 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:02:32,838 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-15T11:02:32,840 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:02:32,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742041_1217 (size=338) 2024-11-15T11:02:32,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742041_1217 (size=338) 2024-11-15T11:02:32,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742041_1217 (size=338) 2024-11-15T11:02:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742042_1218 (size=15) 2024-11-15T11:02:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742042_1218 (size=15) 2024-11-15T11:02:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742042_1218 (size=15) 2024-11-15T11:02:33,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742043_1219 (size=303779) 2024-11-15T11:02:33,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742043_1219 (size=303779) 2024-11-15T11:02:33,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742043_1219 (size=303779) 2024-11-15T11:02:33,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-15T11:02:33,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-15T11:02:33,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-15T11:02:33,777 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:02:33,777 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:02:33,779 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0003_000001 (auth:SIMPLE) from 127.0.0.1:49002 2024-11-15T11:02:33,791 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0003/container_1731668480794_0003_01_000001/launch_container.sh] 2024-11-15T11:02:33,791 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0003/container_1731668480794_0003_01_000001/container_tokens] 2024-11-15T11:02:33,791 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0003/container_1731668480794_0003_01_000001/sysfs] 2024-11-15T11:02:34,133 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0004_000001 (auth:SIMPLE) from 127.0.0.1:39662 2024-11-15T11:02:34,466 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:02:39,484 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0004_000001 (auth:SIMPLE) from 127.0.0.1:51276 2024-11-15T11:02:39,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742044_1220 (size=349429) 2024-11-15T11:02:39,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742044_1220 (size=349429) 2024-11-15T11:02:39,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742044_1220 (size=349429) 2024-11-15T11:02:41,796 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0004_000001 (auth:SIMPLE) from 127.0.0.1:57410 2024-11-15T11:02:41,949 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:02:46,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742045_1221 (size=17447) 2024-11-15T11:02:46,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742045_1221 (size=17447) 2024-11-15T11:02:46,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742045_1221 (size=17447) 2024-11-15T11:02:46,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742046_1222 (size=462) 2024-11-15T11:02:46,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742046_1222 (size=462) 2024-11-15T11:02:46,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742046_1222 (size=462) 2024-11-15T11:02:46,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742047_1223 (size=17447) 2024-11-15T11:02:46,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742047_1223 (size=17447) 2024-11-15T11:02:46,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742047_1223 (size=17447) 2024-11-15T11:02:46,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742048_1224 (size=349429) 2024-11-15T11:02:46,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742048_1224 (size=349429) 2024-11-15T11:02:46,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742048_1224 (size=349429) 2024-11-15T11:02:46,688 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0004_000001 (auth:SIMPLE) from 127.0.0.1:57426 2024-11-15T11:02:48,448 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:02:48,448 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:02:48,452 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-15T11:02:48,452 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:02:48,452 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:02:48,452 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T11:02:48,453 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T11:02:48,453 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T11:02:48,453 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@3cf44f9e in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T11:02:48,453 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T11:02:48,453 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T11:02:48,455 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:48,485 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:02:48,485 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@3cf44f9e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T11:02:48,488 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:02:48,492 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T11:02:48,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:48,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:48,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,803 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-326514900104262332.jar 2024-11-15T11:02:49,803 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,803 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,879 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-6445976991198579815.jar 2024-11-15T11:02:49,880 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,880 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,880 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,881 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,881 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,881 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:02:49,882 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:02:49,882 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:02:49,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:02:49,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:02:49,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:02:49,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:02:49,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:02:49,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:02:49,885 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:02:49,885 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:02:49,885 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:02:49,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:49,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:49,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:49,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:49,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:02:49,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:49,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:02:49,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742049_1225 (size=131440) 2024-11-15T11:02:49,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742049_1225 (size=131440) 2024-11-15T11:02:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742049_1225 (size=131440) 2024-11-15T11:02:50,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742050_1226 (size=4188619) 2024-11-15T11:02:50,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742050_1226 (size=4188619) 2024-11-15T11:02:50,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742050_1226 (size=4188619) 2024-11-15T11:02:50,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742051_1227 (size=1323991) 2024-11-15T11:02:50,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742051_1227 (size=1323991) 2024-11-15T11:02:50,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742051_1227 (size=1323991) 2024-11-15T11:02:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742052_1228 (size=903737) 2024-11-15T11:02:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742052_1228 (size=903737) 2024-11-15T11:02:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742052_1228 (size=903737) 2024-11-15T11:02:50,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742053_1229 (size=8360083) 2024-11-15T11:02:50,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742053_1229 (size=8360083) 2024-11-15T11:02:50,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742053_1229 (size=8360083) 2024-11-15T11:02:50,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742054_1230 (size=1877034) 2024-11-15T11:02:50,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742054_1230 (size=1877034) 2024-11-15T11:02:50,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742054_1230 (size=1877034) 2024-11-15T11:02:50,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742055_1231 (size=77835) 2024-11-15T11:02:50,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742055_1231 (size=77835) 2024-11-15T11:02:50,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742055_1231 (size=77835) 2024-11-15T11:02:50,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742056_1232 (size=30949) 2024-11-15T11:02:50,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742056_1232 (size=30949) 2024-11-15T11:02:50,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742056_1232 (size=30949) 2024-11-15T11:02:50,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742057_1233 (size=1597327) 2024-11-15T11:02:50,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742057_1233 (size=1597327) 2024-11-15T11:02:50,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742057_1233 (size=1597327) 2024-11-15T11:02:50,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742058_1234 (size=4695811) 2024-11-15T11:02:50,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742058_1234 (size=4695811) 2024-11-15T11:02:50,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742058_1234 (size=4695811) 2024-11-15T11:02:50,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742059_1235 (size=232957) 2024-11-15T11:02:50,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742059_1235 (size=232957) 2024-11-15T11:02:50,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742059_1235 (size=232957) 2024-11-15T11:02:50,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742060_1236 (size=127628) 2024-11-15T11:02:50,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742060_1236 (size=127628) 2024-11-15T11:02:50,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742060_1236 (size=127628) 2024-11-15T11:02:50,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742061_1237 (size=20406) 2024-11-15T11:02:50,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742061_1237 (size=20406) 2024-11-15T11:02:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742061_1237 (size=20406) 2024-11-15T11:02:50,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742062_1238 (size=5175431) 2024-11-15T11:02:50,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742062_1238 (size=5175431) 2024-11-15T11:02:50,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742062_1238 (size=5175431) 2024-11-15T11:02:50,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742063_1239 (size=217634) 2024-11-15T11:02:50,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742063_1239 (size=217634) 2024-11-15T11:02:50,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742063_1239 (size=217634) 2024-11-15T11:02:50,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742064_1240 (size=6424745) 2024-11-15T11:02:50,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742064_1240 (size=6424745) 2024-11-15T11:02:50,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742064_1240 (size=6424745) 2024-11-15T11:02:50,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742065_1241 (size=1832290) 2024-11-15T11:02:50,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742065_1241 (size=1832290) 2024-11-15T11:02:50,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742065_1241 (size=1832290) 2024-11-15T11:02:50,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742066_1242 (size=322274) 2024-11-15T11:02:50,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742066_1242 (size=322274) 2024-11-15T11:02:50,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742066_1242 (size=322274) 2024-11-15T11:02:50,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742067_1243 (size=503880) 2024-11-15T11:02:50,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742067_1243 (size=503880) 2024-11-15T11:02:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742067_1243 (size=503880) 2024-11-15T11:02:50,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742068_1244 (size=440656) 2024-11-15T11:02:50,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742068_1244 (size=440656) 2024-11-15T11:02:50,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742068_1244 (size=440656) 2024-11-15T11:02:50,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742069_1245 (size=29229) 2024-11-15T11:02:50,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742069_1245 (size=29229) 2024-11-15T11:02:50,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742069_1245 (size=29229) 2024-11-15T11:02:50,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742070_1246 (size=24096) 2024-11-15T11:02:50,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742070_1246 (size=24096) 2024-11-15T11:02:50,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742070_1246 (size=24096) 2024-11-15T11:02:50,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742071_1247 (size=111872) 2024-11-15T11:02:50,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742071_1247 (size=111872) 2024-11-15T11:02:50,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742071_1247 (size=111872) 2024-11-15T11:02:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742072_1248 (size=45609) 2024-11-15T11:02:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742072_1248 (size=45609) 2024-11-15T11:02:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742072_1248 (size=45609) 2024-11-15T11:02:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742073_1249 (size=136454) 2024-11-15T11:02:50,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742073_1249 (size=136454) 2024-11-15T11:02:50,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742073_1249 (size=136454) 2024-11-15T11:02:50,522 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:02:50,524 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-15T11:02:50,526 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:02:50,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742074_1250 (size=338) 2024-11-15T11:02:50,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742074_1250 (size=338) 2024-11-15T11:02:50,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742074_1250 (size=338) 2024-11-15T11:02:50,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742075_1251 (size=15) 2024-11-15T11:02:50,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742075_1251 (size=15) 2024-11-15T11:02:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742075_1251 (size=15) 2024-11-15T11:02:50,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742076_1252 (size=303775) 2024-11-15T11:02:50,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742076_1252 (size=303775) 2024-11-15T11:02:50,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742076_1252 (size=303775) 2024-11-15T11:02:51,766 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0004/container_1731668480794_0004_01_000002/launch_container.sh] 2024-11-15T11:02:51,766 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0004/container_1731668480794_0004_01_000002/container_tokens] 2024-11-15T11:02:51,766 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0004/container_1731668480794_0004_01_000002/sysfs] 2024-11-15T11:02:52,757 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:02:52,757 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:02:52,759 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0004_000001 (auth:SIMPLE) from 127.0.0.1:51212 2024-11-15T11:02:52,771 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_2/usercache/jenkins/appcache/application_1731668480794_0004/container_1731668480794_0004_01_000001/launch_container.sh] 2024-11-15T11:02:52,771 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_2/usercache/jenkins/appcache/application_1731668480794_0004/container_1731668480794_0004_01_000001/container_tokens] 2024-11-15T11:02:52,771 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_2/usercache/jenkins/appcache/application_1731668480794_0004/container_1731668480794_0004_01_000001/sysfs] 2024-11-15T11:02:53,652 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0005_000001 (auth:SIMPLE) from 127.0.0.1:53010 2024-11-15T11:02:59,013 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0005_000001 (auth:SIMPLE) from 127.0.0.1:54342 2024-11-15T11:02:59,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742077_1253 (size=349425) 2024-11-15T11:02:59,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742077_1253 (size=349425) 2024-11-15T11:02:59,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742077_1253 (size=349425) 2024-11-15T11:03:01,272 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0005_000001 (auth:SIMPLE) from 127.0.0.1:37236 2024-11-15T11:03:04,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742078_1254 (size=16925) 2024-11-15T11:03:04,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742078_1254 (size=16925) 2024-11-15T11:03:04,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742078_1254 (size=16925) 2024-11-15T11:03:04,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742079_1255 (size=462) 2024-11-15T11:03:04,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742079_1255 (size=462) 2024-11-15T11:03:04,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742079_1255 (size=462) 2024-11-15T11:03:04,992 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0005/container_1731668480794_0005_01_000002/launch_container.sh] 2024-11-15T11:03:04,993 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0005/container_1731668480794_0005_01_000002/container_tokens] 2024-11-15T11:03:04,993 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0005/container_1731668480794_0005_01_000002/sysfs] 2024-11-15T11:03:05,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742080_1256 (size=16925) 2024-11-15T11:03:05,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742080_1256 (size=16925) 2024-11-15T11:03:05,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742080_1256 (size=16925) 2024-11-15T11:03:05,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742081_1257 (size=349425) 2024-11-15T11:03:05,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742081_1257 (size=349425) 2024-11-15T11:03:05,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742081_1257 (size=349425) 2024-11-15T11:03:05,044 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0005_000001 (auth:SIMPLE) from 127.0.0.1:37246 2024-11-15T11:03:06,780 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:03:06,781 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:03:06,784 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-15T11:03:06,784 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:03:06,784 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:03:06,785 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T11:03:06,786 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T11:03:06,786 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T11:03:06,786 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@3cf44f9e in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T11:03:06,786 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T11:03:06,787 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668550965/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T11:03:06,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-15T11:03:06,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:03:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-15T11:03:06,810 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668586810"}]},"ts":"1731668586810"} 2024-11-15T11:03:06,812 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-15T11:03:06,812 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-15T11:03:06,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-15T11:03:06,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, UNASSIGN}] 2024-11-15T11:03:06,816 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, UNASSIGN 2024-11-15T11:03:06,816 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, UNASSIGN 2024-11-15T11:03:06,819 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=793eac0e5bfab61ca7fe5e8329a9314a, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:06,819 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=ce802a4ff94d9f00553d85d08c2eb68f, regionState=CLOSING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:06,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, UNASSIGN because future has completed 2024-11-15T11:03:06,823 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:06,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:06,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, UNASSIGN because future has completed 2024-11-15T11:03:06,824 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:06,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:06,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-15T11:03:06,977 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:03:06,977 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing ce802a4ff94d9f00553d85d08c2eb68f, disabling compactions & flushes 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing 793eac0e5bfab61ca7fe5e8329a9314a, disabling compactions & flushes 2024-11-15T11:03:06,977 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:03:06,977 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. after waiting 0 ms 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. after waiting 0 ms 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:03:06,977 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:03:06,985 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:06,985 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:06,986 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:06,986 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:06,986 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a. 2024-11-15T11:03:06,986 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f. 2024-11-15T11:03:06,986 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for 793eac0e5bfab61ca7fe5e8329a9314a: Waiting for close lock at 1731668586977Running coprocessor pre-close hooks at 1731668586977Disabling compacts and flushes for region at 1731668586977Disabling writes for close at 1731668586977Writing region close event to WAL at 1731668586979 (+2 ms)Running coprocessor post-close hooks at 1731668586986 (+7 ms)Closed at 1731668586986 2024-11-15T11:03:06,986 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for ce802a4ff94d9f00553d85d08c2eb68f: Waiting for close lock at 1731668586977Running coprocessor pre-close hooks at 1731668586977Disabling compacts and flushes for region at 1731668586977Disabling writes for close at 1731668586977Writing region close event to WAL at 1731668586979 (+2 ms)Running coprocessor post-close hooks at 1731668586986 (+7 ms)Closed at 1731668586986 2024-11-15T11:03:06,988 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed 793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:03:06,989 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=793eac0e5bfab61ca7fe5e8329a9314a, regionState=CLOSED 2024-11-15T11:03:06,989 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:03:06,991 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=ce802a4ff94d9f00553d85d08c2eb68f, regionState=CLOSED 2024-11-15T11:03:06,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:06,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:06,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-11-15T11:03:06,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure 793eac0e5bfab61ca7fe5e8329a9314a, server=4cdab6d79b20,39207,1731668474152 in 170 msec 2024-11-15T11:03:06,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=89 2024-11-15T11:03:06,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure ce802a4ff94d9f00553d85d08c2eb68f, server=4cdab6d79b20,45865,1731668473964 in 170 msec 2024-11-15T11:03:06,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=793eac0e5bfab61ca7fe5e8329a9314a, UNASSIGN in 181 msec 2024-11-15T11:03:06,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=89, resume processing ppid=88 2024-11-15T11:03:06,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce802a4ff94d9f00553d85d08c2eb68f, UNASSIGN in 182 msec 2024-11-15T11:03:07,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-15T11:03:07,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 187 msec 2024-11-15T11:03:07,005 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668587005"}]},"ts":"1731668587005"} 2024-11-15T11:03:07,008 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-15T11:03:07,008 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-15T11:03:07,010 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 202 msec 2024-11-15T11:03:07,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-15T11:03:07,126 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T11:03:07,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-15T11:03:07,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:03:07,129 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:03:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-15T11:03:07,130 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:03:07,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-15T11:03:07,135 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:03:07,135 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:03:07,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T11:03:07,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T11:03:07,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T11:03:07,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T11:03:07,138 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/recovered.edits] 2024-11-15T11:03:07,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-15T11:03:07,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-15T11:03:07,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-15T11:03:07,139 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/recovered.edits] 2024-11-15T11:03:07,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T11:03:07,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T11:03:07,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T11:03:07,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,140 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-11-15T11:03:07,140 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:03:07,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-15T11:03:07,146 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/cf/b6792af580854f6382f2b1b1104a1b0a to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/cf/b6792af580854f6382f2b1b1104a1b0a 2024-11-15T11:03:07,147 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/cf/83eb3ebf9e8f4173bec9cf92b75426c6 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/cf/83eb3ebf9e8f4173bec9cf92b75426c6 2024-11-15T11:03:07,151 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a/recovered.edits/9.seqid 2024-11-15T11:03:07,151 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f/recovered.edits/9.seqid 2024-11-15T11:03:07,151 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/793eac0e5bfab61ca7fe5e8329a9314a 2024-11-15T11:03:07,152 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testConsecutiveExports/ce802a4ff94d9f00553d85d08c2eb68f 2024-11-15T11:03:07,152 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-15T11:03:07,154 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:03:07,158 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-15T11:03:07,162 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-15T11:03:07,164 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:03:07,164 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-15T11:03:07,164 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668587164"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:07,164 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668587164"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:07,167 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:03:07,167 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ce802a4ff94d9f00553d85d08c2eb68f, NAME => 'testtb-testConsecutiveExports,,1731668549275.ce802a4ff94d9f00553d85d08c2eb68f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 793eac0e5bfab61ca7fe5e8329a9314a, NAME => 'testtb-testConsecutiveExports,1,1731668549275.793eac0e5bfab61ca7fe5e8329a9314a.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:03:07,167 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-15T11:03:07,167 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668587167"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:07,169 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-15T11:03:07,170 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T11:03:07,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 43 msec 2024-11-15T11:03:07,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-15T11:03:07,246 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-15T11:03:07,246 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T11:03:07,255 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-15T11:03:07,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-15T11:03:07,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-15T11:03:07,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-15T11:03:07,305 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=789 (was 786) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 29487) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1806085742_1 at /127.0.0.1:37612 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:37644 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44457 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:40834 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3821 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:44457 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:54550 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1806085742_1 at /127.0.0.1:40810 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=795 (was 795), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=518 (was 569), ProcessCount=22 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=666 (was 1215) 2024-11-15T11:03:07,306 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-11-15T11:03:07,332 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=789, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=518, ProcessCount=22, AvailableMemoryMB=663 2024-11-15T11:03:07,332 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-11-15T11:03:07,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:03:07,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:07,337 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:03:07,337 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:07,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-15T11:03:07,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T11:03:07,339 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:03:07,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742082_1258 (size=422) 2024-11-15T11:03:07,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742082_1258 (size=422) 2024-11-15T11:03:07,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742082_1258 (size=422) 2024-11-15T11:03:07,360 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5cc39749fcf86bff70bcc4547b5b621d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:07,369 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f9a2c32652deb65138d9f04999d3eee3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:07,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742083_1259 (size=83) 2024-11-15T11:03:07,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742083_1259 (size=83) 2024-11-15T11:03:07,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742083_1259 (size=83) 2024-11-15T11:03:07,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742084_1260 (size=83) 2024-11-15T11:03:07,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742084_1260 (size=83) 2024-11-15T11:03:07,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742084_1260 (size=83) 2024-11-15T11:03:07,399 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing f9a2c32652deb65138d9f04999d3eee3, disabling compactions & flushes 2024-11-15T11:03:07,400 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. after waiting 0 ms 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:07,400 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for f9a2c32652deb65138d9f04999d3eee3: Waiting for close lock at 1731668587399Disabling compacts and flushes for region at 1731668587399Disabling writes for close at 1731668587400 (+1 ms)Writing region close event to WAL at 1731668587400Closed at 1731668587400 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 5cc39749fcf86bff70bcc4547b5b621d, disabling compactions & flushes 2024-11-15T11:03:07,400 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. after waiting 0 ms 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:07,400 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:07,400 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5cc39749fcf86bff70bcc4547b5b621d: Waiting for close lock at 1731668587400Disabling compacts and flushes for region at 1731668587400Disabling writes for close at 1731668587400Writing region close event to WAL at 1731668587400Closed at 1731668587400 2024-11-15T11:03:07,402 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:03:07,403 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731668587402"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668587402"}]},"ts":"1731668587402"} 2024-11-15T11:03:07,403 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731668587402"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668587402"}]},"ts":"1731668587402"} 2024-11-15T11:03:07,407 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:03:07,408 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:03:07,408 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668587408"}]},"ts":"1731668587408"} 2024-11-15T11:03:07,411 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-15T11:03:07,411 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:03:07,413 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:03:07,413 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:03:07,413 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:03:07,413 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:03:07,413 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:03:07,413 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:03:07,414 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:03:07,414 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:03:07,414 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:03:07,414 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:03:07,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, ASSIGN}] 2024-11-15T11:03:07,415 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, ASSIGN 2024-11-15T11:03:07,415 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, ASSIGN 2024-11-15T11:03:07,416 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, ASSIGN; state=OFFLINE, location=4cdab6d79b20,36113,1731668474097; forceNewPlan=false, retain=false 2024-11-15T11:03:07,416 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:03:07,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T11:03:07,567 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:03:07,567 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=f9a2c32652deb65138d9f04999d3eee3, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:07,568 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=5cc39749fcf86bff70bcc4547b5b621d, regionState=OPENING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:03:07,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, ASSIGN because future has completed 2024-11-15T11:03:07,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure f9a2c32652deb65138d9f04999d3eee3, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:07,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, ASSIGN because future has completed 2024-11-15T11:03:07,572 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:03:07,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T11:03:07,727 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:07,728 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => f9a2c32652deb65138d9f04999d3eee3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:03:07,728 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. service=AccessControlService 2024-11-15T11:03:07,728 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:07,729 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,729 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:07,729 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:07,729 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,729 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => 5cc39749fcf86bff70bcc4547b5b621d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:03:07,729 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,729 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. service=AccessControlService 2024-11-15T11:03:07,729 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:07,730 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,730 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:07,730 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,730 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,731 INFO [StoreOpener-f9a2c32652deb65138d9f04999d3eee3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,731 INFO [StoreOpener-5cc39749fcf86bff70bcc4547b5b621d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,733 INFO [StoreOpener-5cc39749fcf86bff70bcc4547b5b621d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5cc39749fcf86bff70bcc4547b5b621d columnFamilyName cf 2024-11-15T11:03:07,733 INFO [StoreOpener-f9a2c32652deb65138d9f04999d3eee3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f9a2c32652deb65138d9f04999d3eee3 columnFamilyName cf 2024-11-15T11:03:07,733 DEBUG [StoreOpener-f9a2c32652deb65138d9f04999d3eee3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:07,733 DEBUG [StoreOpener-5cc39749fcf86bff70bcc4547b5b621d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:07,734 INFO [StoreOpener-5cc39749fcf86bff70bcc4547b5b621d-1 {}] regionserver.HStore(327): Store=5cc39749fcf86bff70bcc4547b5b621d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:07,734 INFO [StoreOpener-f9a2c32652deb65138d9f04999d3eee3-1 {}] regionserver.HStore(327): Store=f9a2c32652deb65138d9f04999d3eee3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:07,734 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,734 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,735 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,735 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,736 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,736 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,736 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,736 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,736 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,736 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,738 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,738 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,740 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:07,741 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:07,741 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened f9a2c32652deb65138d9f04999d3eee3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64422323, jitterRate=-0.040032580494880676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:07,741 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:07,741 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened 5cc39749fcf86bff70bcc4547b5b621d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65304894, jitterRate=-0.026881247758865356}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:07,741 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:07,742 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for 5cc39749fcf86bff70bcc4547b5b621d: Running coprocessor pre-open hook at 1731668587730Writing region info on filesystem at 1731668587730Initializing all the Stores at 1731668587730Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668587731 (+1 ms)Cleaning up temporary data from old regions at 1731668587736 (+5 ms)Running coprocessor post-open hooks at 1731668587741 (+5 ms)Region opened successfully at 1731668587742 (+1 ms) 2024-11-15T11:03:07,742 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for f9a2c32652deb65138d9f04999d3eee3: Running coprocessor pre-open hook at 1731668587729Writing region info on filesystem at 1731668587729Initializing all the Stores at 1731668587730 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668587730Cleaning up temporary data from old regions at 1731668587736 (+6 ms)Running coprocessor post-open hooks at 1731668587741 (+5 ms)Region opened successfully at 1731668587742 (+1 ms) 2024-11-15T11:03:07,744 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d., pid=98, masterSystemTime=1731668587724 2024-11-15T11:03:07,744 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3., pid=97, masterSystemTime=1731668587723 2024-11-15T11:03:07,746 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:07,746 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:07,747 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=5cc39749fcf86bff70bcc4547b5b621d, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:03:07,747 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:07,747 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:07,749 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=f9a2c32652deb65138d9f04999d3eee3, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:07,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:03:07,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure f9a2c32652deb65138d9f04999d3eee3, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:07,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=95 2024-11-15T11:03:07,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d, server=4cdab6d79b20,36113,1731668474097 in 180 msec 2024-11-15T11:03:07,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-11-15T11:03:07,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure f9a2c32652deb65138d9f04999d3eee3, server=4cdab6d79b20,45865,1731668473964 in 183 msec 2024-11-15T11:03:07,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, ASSIGN in 341 msec 2024-11-15T11:03:07,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=96, resume processing ppid=94 2024-11-15T11:03:07,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, ASSIGN in 342 msec 2024-11-15T11:03:07,759 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:03:07,759 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668587759"}]},"ts":"1731668587759"} 2024-11-15T11:03:07,761 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-15T11:03:07,763 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:03:07,763 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-15T11:03:07,772 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-15T11:03:07,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:07,776 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:07,777 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:07,777 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:07,777 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:07,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 443 msec 2024-11-15T11:03:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T11:03:07,966 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T11:03:07,966 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-11-15T11:03:07,966 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:07,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-11-15T11:03:07,972 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:07,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-11-15T11:03:07,972 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T11:03:07,981 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T11:03:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668587981 (current time:1731668587981). 2024-11-15T11:03:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-15T11:03:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58be46ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:07,983 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:07,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:07,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:07,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@470583e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:07,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:07,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:07,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:07,987 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42018, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:07,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45aef136, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:07,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:07,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:07,993 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:07,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:07,996 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d5ac59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:08,005 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:08,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:08,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:08,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9d61f46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:08,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:08,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,008 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42030, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:08,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fdb5804, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:08,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:08,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:08,013 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48612, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:08,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:08,018 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-15T11:03:08,019 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T11:03:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-15T11:03:08,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-15T11:03:08,023 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:08,024 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:08,027 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:08,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742085_1261 (size=215) 2024-11-15T11:03:08,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742085_1261 (size=215) 2024-11-15T11:03:08,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742085_1261 (size=215) 2024-11-15T11:03:08,054 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:08,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3}] 2024-11-15T11:03:08,056 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:08,056 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-15T11:03:08,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-15T11:03:08,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-15T11:03:08,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:08,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for f9a2c32652deb65138d9f04999d3eee3: 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for 5cc39749fcf86bff70bcc4547b5b621d: 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:08,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:08,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742086_1262 (size=86) 2024-11-15T11:03:08,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742087_1263 (size=86) 2024-11-15T11:03:08,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742087_1263 (size=86) 2024-11-15T11:03:08,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742087_1263 (size=86) 2024-11-15T11:03:08,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742086_1262 (size=86) 2024-11-15T11:03:08,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:08,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742086_1262 (size=86) 2024-11-15T11:03:08,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-15T11:03:08,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:08,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-15T11:03:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-15T11:03:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-15T11:03:08,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:08,222 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:08,222 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:08,222 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:08,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3 in 169 msec 2024-11-15T11:03:08,225 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=99 2024-11-15T11:03:08,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d in 169 msec 2024-11-15T11:03:08,226 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:08,227 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:08,228 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:08,228 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,229 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742088_1264 (size=597) 2024-11-15T11:03:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742088_1264 (size=597) 2024-11-15T11:03:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742088_1264 (size=597) 2024-11-15T11:03:08,244 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:08,259 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:08,260 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,261 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:08,261 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-15T11:03:08,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 241 msec 2024-11-15T11:03:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-15T11:03:08,336 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T11:03:08,341 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='055b20ec1f6b62c70857c0378a80d6a13', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:03:08,342 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='1a0752ee6d37a855d185388616e7ac1fb', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:08,343 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='246b4992c6e0eabed5d600470cb9b7811', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:08,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36113 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:08,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:08,353 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T11:03:08,355 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,355 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:08,356 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:08,357 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T11:03:08,362 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T11:03:08,367 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T11:03:08,369 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T11:03:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668588369 (current time:1731668588369). 2024-11-15T11:03:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-15T11:03:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23f9100b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:08,370 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:08,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:08,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:08,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f6c3905, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:08,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:08,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,372 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42046, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:08,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@774fd836, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:08,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:08,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:08,375 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:08,376 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,376 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@130f02e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:08,378 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:08,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:08,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:08,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cf76c8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:08,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:08,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,380 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42064, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:08,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b33961, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:08,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:08,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:08,383 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:08,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:08,386 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:08,387 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-15T11:03:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T11:03:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-15T11:03:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T11:03:08,389 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:08,390 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:08,393 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:08,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742089_1265 (size=210) 2024-11-15T11:03:08,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742089_1265 (size=210) 2024-11-15T11:03:08,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742089_1265 (size=210) 2024-11-15T11:03:08,400 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:08,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3}] 2024-11-15T11:03:08,401 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:08,401 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:08,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T11:03:08,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-15T11:03:08,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-15T11:03:08,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:08,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:08,553 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing f9a2c32652deb65138d9f04999d3eee3 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-15T11:03:08,553 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing 5cc39749fcf86bff70bcc4547b5b621d 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-15T11:03:08,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/.tmp/cf/2eadf01a4d074d988349d4805888fb49 is 71, key is 146ad081ad164649aed49440afe6bdda/cf:q/1731668588351/Put/seqid=0 2024-11-15T11:03:08,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742090_1266 (size=8392) 2024-11-15T11:03:08,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742090_1266 (size=8392) 2024-11-15T11:03:08,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742090_1266 (size=8392) 2024-11-15T11:03:08,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/.tmp/cf/b00b7a7f8c024d2c8406f445a733dac5 is 71, key is 02ff19721c35dc0263dde6f21da36d0c/cf:q/1731668588349/Put/seqid=0 2024-11-15T11:03:08,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/.tmp/cf/2eadf01a4d074d988349d4805888fb49 2024-11-15T11:03:08,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/.tmp/cf/2eadf01a4d074d988349d4805888fb49 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/cf/2eadf01a4d074d988349d4805888fb49 2024-11-15T11:03:08,591 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/cf/2eadf01a4d074d988349d4805888fb49, entries=48, sequenceid=6, filesize=8.2 K 2024-11-15T11:03:08,592 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for f9a2c32652deb65138d9f04999d3eee3 in 39ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:08,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-15T11:03:08,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for f9a2c32652deb65138d9f04999d3eee3: 2024-11-15T11:03:08,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T11:03:08,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:08,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/cf/2eadf01a4d074d988349d4805888fb49] hfiles 2024-11-15T11:03:08,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/cf/2eadf01a4d074d988349d4805888fb49 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742091_1267 (size=5216) 2024-11-15T11:03:08,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742091_1267 (size=5216) 2024-11-15T11:03:08,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742091_1267 (size=5216) 2024-11-15T11:03:08,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/.tmp/cf/b00b7a7f8c024d2c8406f445a733dac5 2024-11-15T11:03:08,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/.tmp/cf/b00b7a7f8c024d2c8406f445a733dac5 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/cf/b00b7a7f8c024d2c8406f445a733dac5 2024-11-15T11:03:08,611 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/cf/b00b7a7f8c024d2c8406f445a733dac5, entries=2, sequenceid=6, filesize=5.1 K 2024-11-15T11:03:08,612 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 5cc39749fcf86bff70bcc4547b5b621d in 59ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:08,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 5cc39749fcf86bff70bcc4547b5b621d: 2024-11-15T11:03:08,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T11:03:08,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:08,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/cf/b00b7a7f8c024d2c8406f445a733dac5] hfiles 2024-11-15T11:03:08,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/cf/b00b7a7f8c024d2c8406f445a733dac5 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:08,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742092_1268 (size=125) 2024-11-15T11:03:08,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742092_1268 (size=125) 2024-11-15T11:03:08,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742092_1268 (size=125) 2024-11-15T11:03:08,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:08,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-15T11:03:08,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-15T11:03:08,633 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:08,633 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:08,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742093_1269 (size=125) 2024-11-15T11:03:08,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742093_1269 (size=125) 2024-11-15T11:03:08,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742093_1269 (size=125) 2024-11-15T11:03:08,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f9a2c32652deb65138d9f04999d3eee3 in 234 msec 2024-11-15T11:03:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T11:03:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T11:03:09,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:09,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-15T11:03:09,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-15T11:03:09,042 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:09,042 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:09,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=102 2024-11-15T11:03:09,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d in 643 msec 2024-11-15T11:03:09,045 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:09,045 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:09,046 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:09,046 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:09,047 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:09,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742094_1270 (size=675) 2024-11-15T11:03:09,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742094_1270 (size=675) 2024-11-15T11:03:09,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742094_1270 (size=675) 2024-11-15T11:03:09,061 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:09,066 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:09,067 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:09,068 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:09,068 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-15T11:03:09,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 680 msec 2024-11-15T11:03:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T11:03:09,525 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T11:03:09,548 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:03:09,550 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:03:09,550 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T11:03:09,551 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:03:09,551 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:03:09,552 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T11:03:09,552 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-15T11:03:09,552 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36113 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-15T11:03:09,552 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-15T11:03:09,554 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:03:09,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:09,556 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:03:09,556 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:09,557 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-15T11:03:09,557 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:03:09,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T11:03:09,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742095_1271 (size=399) 2024-11-15T11:03:09,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742095_1271 (size=399) 2024-11-15T11:03:09,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742095_1271 (size=399) 2024-11-15T11:03:09,566 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2cd824b6831acfe6688b4305519b00a1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:09,566 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 441a0925a8a77248db843c908bcbfd6b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:09,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742096_1272 (size=85) 2024-11-15T11:03:09,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742097_1273 (size=85) 2024-11-15T11:03:09,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742096_1272 (size=85) 2024-11-15T11:03:09,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742096_1272 (size=85) 2024-11-15T11:03:09,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742097_1273 (size=85) 2024-11-15T11:03:09,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742097_1273 (size=85) 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 2cd824b6831acfe6688b4305519b00a1, disabling compactions & flushes 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 441a0925a8a77248db843c908bcbfd6b, disabling compactions & flushes 2024-11-15T11:03:09,574 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:09,574 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. after waiting 0 ms 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:09,574 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2cd824b6831acfe6688b4305519b00a1: Waiting for close lock at 1731668589574Disabling compacts and flushes for region at 1731668589574Disabling writes for close at 1731668589574Writing region close event to WAL at 1731668589574Closed at 1731668589574 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. after waiting 0 ms 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:09,574 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:09,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 441a0925a8a77248db843c908bcbfd6b: Waiting for close lock at 1731668589574Disabling compacts and flushes for region at 1731668589574Disabling writes for close at 1731668589574Writing region close event to WAL at 1731668589574Closed at 1731668589574 2024-11-15T11:03:09,575 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:03:09,576 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731668589575"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668589575"}]},"ts":"1731668589575"} 2024-11-15T11:03:09,576 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731668589575"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668589575"}]},"ts":"1731668589575"} 2024-11-15T11:03:09,578 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:03:09,579 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:03:09,579 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668589579"}]},"ts":"1731668589579"} 2024-11-15T11:03:09,581 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-15T11:03:09,581 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:03:09,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:03:09,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:03:09,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:03:09,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:03:09,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:03:09,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:03:09,582 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:03:09,582 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:03:09,582 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:03:09,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:03:09,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, ASSIGN}] 2024-11-15T11:03:09,584 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, ASSIGN 2024-11-15T11:03:09,584 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, ASSIGN 2024-11-15T11:03:09,584 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:03:09,584 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:03:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T11:03:09,735 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:03:09,735 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=441a0925a8a77248db843c908bcbfd6b, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:09,735 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=2cd824b6831acfe6688b4305519b00a1, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:09,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, ASSIGN because future has completed 2024-11-15T11:03:09,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 441a0925a8a77248db843c908bcbfd6b, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:09,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, ASSIGN because future has completed 2024-11-15T11:03:09,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cd824b6831acfe6688b4305519b00a1, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T11:03:09,894 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:09,894 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => 441a0925a8a77248db843c908bcbfd6b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b.', STARTKEY => '2', ENDKEY => ''} 2024-11-15T11:03:09,894 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. service=AccessControlService 2024-11-15T11:03:09,894 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:09,894 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => 2cd824b6831acfe6688b4305519b00a1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1.', STARTKEY => '', ENDKEY => '2'} 2024-11-15T11:03:09,895 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. service=AccessControlService 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:09,895 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,895 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,896 INFO [StoreOpener-441a0925a8a77248db843c908bcbfd6b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,897 INFO [StoreOpener-2cd824b6831acfe6688b4305519b00a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,898 INFO [StoreOpener-2cd824b6831acfe6688b4305519b00a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cd824b6831acfe6688b4305519b00a1 columnFamilyName cf 2024-11-15T11:03:09,898 DEBUG [StoreOpener-2cd824b6831acfe6688b4305519b00a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:09,898 INFO [StoreOpener-441a0925a8a77248db843c908bcbfd6b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 441a0925a8a77248db843c908bcbfd6b columnFamilyName cf 2024-11-15T11:03:09,898 DEBUG [StoreOpener-441a0925a8a77248db843c908bcbfd6b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:09,898 INFO [StoreOpener-2cd824b6831acfe6688b4305519b00a1-1 {}] regionserver.HStore(327): Store=2cd824b6831acfe6688b4305519b00a1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:09,899 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,899 INFO [StoreOpener-441a0925a8a77248db843c908bcbfd6b-1 {}] regionserver.HStore(327): Store=441a0925a8a77248db843c908bcbfd6b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:09,899 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,899 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,899 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,900 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,900 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,900 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,900 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,900 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,900 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,901 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,903 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,904 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:09,904 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened 2cd824b6831acfe6688b4305519b00a1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75318415, jitterRate=0.12233184278011322}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:09,904 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:09,905 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:09,905 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for 2cd824b6831acfe6688b4305519b00a1: Running coprocessor pre-open hook at 1731668589895Writing region info on filesystem at 1731668589895Initializing all the Stores at 1731668589896 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668589896Cleaning up temporary data from old regions at 1731668589900 (+4 ms)Running coprocessor post-open hooks at 1731668589904 (+4 ms)Region opened successfully at 1731668589905 (+1 ms) 2024-11-15T11:03:09,905 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened 441a0925a8a77248db843c908bcbfd6b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71272538, jitterRate=0.062043577432632446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:09,906 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:09,906 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for 441a0925a8a77248db843c908bcbfd6b: Running coprocessor pre-open hook at 1731668589895Writing region info on filesystem at 1731668589895Initializing all the Stores at 1731668589896 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668589896Cleaning up temporary data from old regions at 1731668589900 (+4 ms)Running coprocessor post-open hooks at 1731668589906 (+6 ms)Region opened successfully at 1731668589906 2024-11-15T11:03:09,906 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b., pid=108, masterSystemTime=1731668589890 2024-11-15T11:03:09,906 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1., pid=109, masterSystemTime=1731668589891 2024-11-15T11:03:09,908 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:09,908 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:09,909 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=2cd824b6831acfe6688b4305519b00a1, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:09,909 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:09,910 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:09,911 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=441a0925a8a77248db843c908bcbfd6b, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:09,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cd824b6831acfe6688b4305519b00a1, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:09,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 441a0925a8a77248db843c908bcbfd6b, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:09,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=106 2024-11-15T11:03:09,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure 2cd824b6831acfe6688b4305519b00a1, server=4cdab6d79b20,39207,1731668474152 in 173 msec 2024-11-15T11:03:09,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-15T11:03:09,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure 441a0925a8a77248db843c908bcbfd6b, server=4cdab6d79b20,45865,1731668473964 in 176 msec 2024-11-15T11:03:09,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, ASSIGN in 331 msec 2024-11-15T11:03:09,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=107, resume processing ppid=105 2024-11-15T11:03:09,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, ASSIGN in 332 msec 2024-11-15T11:03:09,918 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:03:09,918 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668589918"}]},"ts":"1731668589918"} 2024-11-15T11:03:09,920 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-15T11:03:09,921 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:03:09,921 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-15T11:03:09,924 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-15T11:03:09,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:09,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:09,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:09,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:09,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 374 msec 2024-11-15T11:03:10,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T11:03:10,185 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T11:03:10,188 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:10,192 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:10,195 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-15T11:03:10,208 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [2cd824b6831acfe6688b4305519b00a1, 441a0925a8a77248db843c908bcbfd6b] 2024-11-15T11:03:10,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2cd824b6831acfe6688b4305519b00a1, 441a0925a8a77248db843c908bcbfd6b], force=true 2024-11-15T11:03:10,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2cd824b6831acfe6688b4305519b00a1, 441a0925a8a77248db843c908bcbfd6b], force=true 2024-11-15T11:03:10,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2cd824b6831acfe6688b4305519b00a1, 441a0925a8a77248db843c908bcbfd6b], force=true 2024-11-15T11:03:10,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2cd824b6831acfe6688b4305519b00a1, 441a0925a8a77248db843c908bcbfd6b], force=true 2024-11-15T11:03:10,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T11:03:10,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, UNASSIGN}] 2024-11-15T11:03:10,228 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, UNASSIGN 2024-11-15T11:03:10,229 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, UNASSIGN 2024-11-15T11:03:10,229 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=2cd824b6831acfe6688b4305519b00a1, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:10,230 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=441a0925a8a77248db843c908bcbfd6b, regionState=CLOSING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:10,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, UNASSIGN because future has completed 2024-11-15T11:03:10,232 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:10,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2cd824b6831acfe6688b4305519b00a1, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:10,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, UNASSIGN because future has completed 2024-11-15T11:03:10,233 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:10,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 441a0925a8a77248db843c908bcbfd6b, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:10,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T11:03:10,385 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:10,385 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T11:03:10,385 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing 2cd824b6831acfe6688b4305519b00a1, disabling compactions & flushes 2024-11-15T11:03:10,385 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:10,385 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:10,385 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. after waiting 0 ms 2024-11-15T11:03:10,385 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:10,385 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing 2cd824b6831acfe6688b4305519b00a1 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-15T11:03:10,385 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:10,385 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T11:03:10,386 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing 441a0925a8a77248db843c908bcbfd6b, disabling compactions & flushes 2024-11-15T11:03:10,386 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:10,386 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:10,386 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. after waiting 0 ms 2024-11-15T11:03:10,386 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:10,386 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing 441a0925a8a77248db843c908bcbfd6b 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-15T11:03:10,406 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/.tmp/cf/b3860e202c3b4833be61af85c77a6e54 is 28, key is 1/cf:/1731668590189/Put/seqid=0 2024-11-15T11:03:10,406 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/.tmp/cf/0a7ebcf6451347bb8241286c67366120 is 28, key is 2/cf:/1731668590194/Put/seqid=0 2024-11-15T11:03:10,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742098_1274 (size=4945) 2024-11-15T11:03:10,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742098_1274 (size=4945) 2024-11-15T11:03:10,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742098_1274 (size=4945) 2024-11-15T11:03:10,412 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/.tmp/cf/0a7ebcf6451347bb8241286c67366120 2024-11-15T11:03:10,419 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/.tmp/cf/0a7ebcf6451347bb8241286c67366120 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/cf/0a7ebcf6451347bb8241286c67366120 2024-11-15T11:03:10,425 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/cf/0a7ebcf6451347bb8241286c67366120, entries=1, sequenceid=5, filesize=4.8 K 2024-11-15T11:03:10,426 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 441a0925a8a77248db843c908bcbfd6b in 40ms, sequenceid=5, compaction requested=false 2024-11-15T11:03:10,426 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-15T11:03:10,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742099_1275 (size=4945) 2024-11-15T11:03:10,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742099_1275 (size=4945) 2024-11-15T11:03:10,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742099_1275 (size=4945) 2024-11-15T11:03:10,433 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/.tmp/cf/b3860e202c3b4833be61af85c77a6e54 2024-11-15T11:03:10,439 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/.tmp/cf/b3860e202c3b4833be61af85c77a6e54 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/cf/b3860e202c3b4833be61af85c77a6e54 2024-11-15T11:03:10,446 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/cf/b3860e202c3b4833be61af85c77a6e54, entries=1, sequenceid=5, filesize=4.8 K 2024-11-15T11:03:10,448 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 2cd824b6831acfe6688b4305519b00a1 in 63ms, sequenceid=5, compaction requested=false 2024-11-15T11:03:10,452 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T11:03:10,453 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T11:03:10,453 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:10,453 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. 2024-11-15T11:03:10,453 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for 441a0925a8a77248db843c908bcbfd6b: Waiting for close lock at 1731668590386Running coprocessor pre-close hooks at 1731668590386Disabling compacts and flushes for region at 1731668590386Disabling writes for close at 1731668590386Obtaining lock to block concurrent updates at 1731668590386Preparing flush snapshotting stores in 441a0925a8a77248db843c908bcbfd6b at 1731668590386Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731668590386Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b. at 1731668590387 (+1 ms)Flushing 441a0925a8a77248db843c908bcbfd6b/cf: creating writer at 1731668590387Flushing 441a0925a8a77248db843c908bcbfd6b/cf: appending metadata at 1731668590405 (+18 ms)Flushing 441a0925a8a77248db843c908bcbfd6b/cf: closing flushed file at 1731668590405Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3fe5d110: reopening flushed file at 1731668590419 (+14 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 441a0925a8a77248db843c908bcbfd6b in 40ms, sequenceid=5, compaction requested=false at 1731668590426 (+7 ms)Writing region close event to WAL at 1731668590446 (+20 ms)Running coprocessor post-close hooks at 1731668590453 (+7 ms)Closed at 1731668590453 2024-11-15T11:03:10,454 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:10,454 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. 2024-11-15T11:03:10,454 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for 2cd824b6831acfe6688b4305519b00a1: Waiting for close lock at 1731668590385Running coprocessor pre-close hooks at 1731668590385Disabling compacts and flushes for region at 1731668590385Disabling writes for close at 1731668590385Obtaining lock to block concurrent updates at 1731668590385Preparing flush snapshotting stores in 2cd824b6831acfe6688b4305519b00a1 at 1731668590385Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731668590385Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1. at 1731668590386 (+1 ms)Flushing 2cd824b6831acfe6688b4305519b00a1/cf: creating writer at 1731668590386Flushing 2cd824b6831acfe6688b4305519b00a1/cf: appending metadata at 1731668590405 (+19 ms)Flushing 2cd824b6831acfe6688b4305519b00a1/cf: closing flushed file at 1731668590405Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e11c18c: reopening flushed file at 1731668590438 (+33 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 2cd824b6831acfe6688b4305519b00a1 in 63ms, sequenceid=5, compaction requested=false at 1731668590448 (+10 ms)Writing region close event to WAL at 1731668590449 (+1 ms)Running coprocessor post-close hooks at 1731668590453 (+4 ms)Closed at 1731668590454 (+1 ms) 2024-11-15T11:03:10,455 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed 441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:10,456 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=441a0925a8a77248db843c908bcbfd6b, regionState=CLOSED 2024-11-15T11:03:10,456 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed 2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:10,457 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=2cd824b6831acfe6688b4305519b00a1, regionState=CLOSED 2024-11-15T11:03:10,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 441a0925a8a77248db843c908bcbfd6b, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:10,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2cd824b6831acfe6688b4305519b00a1, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:10,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=112 2024-11-15T11:03:10,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure 441a0925a8a77248db843c908bcbfd6b, server=4cdab6d79b20,45865,1731668473964 in 227 msec 2024-11-15T11:03:10,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=111 2024-11-15T11:03:10,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure 2cd824b6831acfe6688b4305519b00a1, server=4cdab6d79b20,39207,1731668474152 in 228 msec 2024-11-15T11:03:10,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=441a0925a8a77248db843c908bcbfd6b, UNASSIGN in 236 msec 2024-11-15T11:03:10,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-15T11:03:10,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2cd824b6831acfe6688b4305519b00a1, UNASSIGN in 236 msec 2024-11-15T11:03:10,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742100_1276 (size=84) 2024-11-15T11:03:10,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742100_1276 (size=84) 2024-11-15T11:03:10,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742100_1276 (size=84) 2024-11-15T11:03:10,483 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:10,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742101_1277 (size=20) 2024-11-15T11:03:10,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742101_1277 (size=20) 2024-11-15T11:03:10,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742101_1277 (size=20) 2024-11-15T11:03:10,496 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:10,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742102_1278 (size=21) 2024-11-15T11:03:10,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742102_1278 (size=21) 2024-11-15T11:03:10,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742102_1278 (size=21) 2024-11-15T11:03:10,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742103_1279 (size=84) 2024-11-15T11:03:10,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742103_1279 (size=84) 2024-11-15T11:03:10,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742103_1279 (size=84) 2024-11-15T11:03:10,512 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:10,524 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-15T11:03:10,527 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589554.2cd824b6831acfe6688b4305519b00a1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:10,527 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731668589554.441a0925a8a77248db843c908bcbfd6b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:10,527 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:10,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T11:03:10,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, ASSIGN}] 2024-11-15T11:03:10,550 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, ASSIGN 2024-11-15T11:03:10,551 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, ASSIGN; state=MERGED, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:03:10,701 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T11:03:10,701 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=769c6b89fbcdc818cf21aa5f370a5f6e, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:10,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, ASSIGN because future has completed 2024-11-15T11:03:10,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:10,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T11:03:10,859 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:10,859 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => 769c6b89fbcdc818cf21aa5f370a5f6e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e.', STARTKEY => '', ENDKEY => ''} 2024-11-15T11:03:10,859 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. service=AccessControlService 2024-11-15T11:03:10,859 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:10,860 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,860 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:10,860 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,860 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,861 INFO [StoreOpener-769c6b89fbcdc818cf21aa5f370a5f6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,862 INFO [StoreOpener-769c6b89fbcdc818cf21aa5f370a5f6e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 769c6b89fbcdc818cf21aa5f370a5f6e columnFamilyName cf 2024-11-15T11:03:10,862 DEBUG [StoreOpener-769c6b89fbcdc818cf21aa5f370a5f6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:10,882 DEBUG [StoreOpener-769c6b89fbcdc818cf21aa5f370a5f6e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/0a7ebcf6451347bb8241286c67366120.441a0925a8a77248db843c908bcbfd6b->hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/cf/0a7ebcf6451347bb8241286c67366120-top 2024-11-15T11:03:10,887 DEBUG [StoreOpener-769c6b89fbcdc818cf21aa5f370a5f6e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/b3860e202c3b4833be61af85c77a6e54.2cd824b6831acfe6688b4305519b00a1->hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/cf/b3860e202c3b4833be61af85c77a6e54-top 2024-11-15T11:03:10,888 INFO [StoreOpener-769c6b89fbcdc818cf21aa5f370a5f6e-1 {}] regionserver.HStore(327): Store=769c6b89fbcdc818cf21aa5f370a5f6e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:10,888 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,888 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,889 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,890 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,890 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,891 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,892 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened 769c6b89fbcdc818cf21aa5f370a5f6e; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71672393, jitterRate=0.0680018812417984}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:10,892 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:10,893 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for 769c6b89fbcdc818cf21aa5f370a5f6e: Running coprocessor pre-open hook at 1731668590860Writing region info on filesystem at 1731668590860Initializing all the Stores at 1731668590861 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668590861Cleaning up temporary data from old regions at 1731668590890 (+29 ms)Running coprocessor post-open hooks at 1731668590892 (+2 ms)Region opened successfully at 1731668590893 (+1 ms) 2024-11-15T11:03:10,893 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e., pid=116, masterSystemTime=1731668590855 2024-11-15T11:03:10,894 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e.,because compaction is disabled. 2024-11-15T11:03:10,896 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:10,896 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:10,897 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=769c6b89fbcdc818cf21aa5f370a5f6e, regionState=OPEN, openSeqNum=9, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:10,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:10,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-15T11:03:10,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e, server=4cdab6d79b20,39207,1731668474152 in 196 msec 2024-11-15T11:03:10,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-15T11:03:10,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, ASSIGN in 354 msec 2024-11-15T11:03:10,905 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2cd824b6831acfe6688b4305519b00a1, 441a0925a8a77248db843c908bcbfd6b], force=true in 694 msec 2024-11-15T11:03:11,131 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0005_000001 (auth:SIMPLE) from 127.0.0.1:39056 2024-11-15T11:03:11,140 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0005/container_1731668480794_0005_01_000001/launch_container.sh] 2024-11-15T11:03:11,141 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0005/container_1731668480794_0005_01_000001/container_tokens] 2024-11-15T11:03:11,141 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0005/container_1731668480794_0005_01_000001/sysfs] 2024-11-15T11:03:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T11:03:11,355 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T11:03:11,356 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-15T11:03:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668591356 (current time:1731668591356). 2024-11-15T11:03:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-15T11:03:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ca5aaeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:11,358 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:11,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:11,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:11,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18524132, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:11,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:11,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:11,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:11,359 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:11,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@738cdaa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:11,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:11,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:11,362 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:11,363 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:11,363 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6376d341, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:11,365 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:11,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:11,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:11,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67430ea9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:11,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:11,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:11,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:11,366 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:11,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f2c0d95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:11,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:11,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:11,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50502, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:11,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:11,372 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:11,372 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-15T11:03:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-15T11:03:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-15T11:03:11,375 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-15T11:03:11,376 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:11,379 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:11,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742104_1280 (size=216) 2024-11-15T11:03:11,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742104_1280 (size=216) 2024-11-15T11:03:11,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742104_1280 (size=216) 2024-11-15T11:03:11,388 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:11,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e}] 2024-11-15T11:03:11,389 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-15T11:03:11,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-15T11:03:11,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:11,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for 769c6b89fbcdc818cf21aa5f370a5f6e: 2024-11-15T11:03:11,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-15T11:03:11,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:11,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/0a7ebcf6451347bb8241286c67366120.441a0925a8a77248db843c908bcbfd6b->hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/cf/0a7ebcf6451347bb8241286c67366120-top, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/b3860e202c3b4833be61af85c77a6e54.2cd824b6831acfe6688b4305519b00a1->hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/cf/b3860e202c3b4833be61af85c77a6e54-top] hfiles 2024-11-15T11:03:11,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/0a7ebcf6451347bb8241286c67366120.441a0925a8a77248db843c908bcbfd6b for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/b3860e202c3b4833be61af85c77a6e54.2cd824b6831acfe6688b4305519b00a1 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742105_1281 (size=269) 2024-11-15T11:03:11,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742105_1281 (size=269) 2024-11-15T11:03:11,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742105_1281 (size=269) 2024-11-15T11:03:11,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:11,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-15T11:03:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-15T11:03:11,552 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:11,552 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:11,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-15T11:03:11,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e in 165 msec 2024-11-15T11:03:11,555 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:11,556 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:11,556 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:11,556 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,557 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742106_1282 (size=670) 2024-11-15T11:03:11,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742106_1282 (size=670) 2024-11-15T11:03:11,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742106_1282 (size=670) 2024-11-15T11:03:11,567 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:11,572 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:11,573 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,576 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:11,576 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-15T11:03:11,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 203 msec 2024-11-15T11:03:11,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-15T11:03:11,695 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T11:03:11,696 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696 2024-11-15T11:03:11,696 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:11,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:11,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,729 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:03:11,735 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742108_1284 (size=670) 2024-11-15T11:03:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742108_1284 (size=670) 2024-11-15T11:03:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742108_1284 (size=670) 2024-11-15T11:03:11,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742107_1283 (size=216) 2024-11-15T11:03:11,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742107_1283 (size=216) 2024-11-15T11:03:11,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742107_1283 (size=216) 2024-11-15T11:03:11,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:11,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:11,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:11,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:03:12,525 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:03:12,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-14278455991118720121.jar 2024-11-15T11:03:12,843 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,843 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,909 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-3550467781197677216.jar 2024-11-15T11:03:12,910 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,910 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,910 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:12,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:03:12,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:03:12,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:03:12,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:03:12,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:03:12,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:03:12,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:03:12,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:03:12,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:03:12,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:03:12,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:03:12,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:12,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:12,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:12,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:12,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:12,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:12,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:12,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742109_1285 (size=131440) 2024-11-15T11:03:12,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742109_1285 (size=131440) 2024-11-15T11:03:12,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742109_1285 (size=131440) 2024-11-15T11:03:13,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742110_1286 (size=4188619) 2024-11-15T11:03:13,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742110_1286 (size=4188619) 2024-11-15T11:03:13,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742110_1286 (size=4188619) 2024-11-15T11:03:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742111_1287 (size=1323991) 2024-11-15T11:03:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742111_1287 (size=1323991) 2024-11-15T11:03:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742111_1287 (size=1323991) 2024-11-15T11:03:13,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742112_1288 (size=903737) 2024-11-15T11:03:13,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742112_1288 (size=903737) 2024-11-15T11:03:13,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742112_1288 (size=903737) 2024-11-15T11:03:13,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742113_1289 (size=8360083) 2024-11-15T11:03:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742113_1289 (size=8360083) 2024-11-15T11:03:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742113_1289 (size=8360083) 2024-11-15T11:03:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742114_1290 (size=440656) 2024-11-15T11:03:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742114_1290 (size=440656) 2024-11-15T11:03:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742114_1290 (size=440656) 2024-11-15T11:03:13,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742115_1291 (size=1877034) 2024-11-15T11:03:13,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742115_1291 (size=1877034) 2024-11-15T11:03:13,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742115_1291 (size=1877034) 2024-11-15T11:03:13,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742116_1292 (size=77835) 2024-11-15T11:03:13,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742116_1292 (size=77835) 2024-11-15T11:03:13,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742116_1292 (size=77835) 2024-11-15T11:03:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742117_1293 (size=30949) 2024-11-15T11:03:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742117_1293 (size=30949) 2024-11-15T11:03:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742117_1293 (size=30949) 2024-11-15T11:03:13,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742118_1294 (size=1597327) 2024-11-15T11:03:13,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742118_1294 (size=1597327) 2024-11-15T11:03:13,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742118_1294 (size=1597327) 2024-11-15T11:03:13,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742119_1295 (size=6424745) 2024-11-15T11:03:13,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742119_1295 (size=6424745) 2024-11-15T11:03:13,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742119_1295 (size=6424745) 2024-11-15T11:03:13,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742120_1296 (size=4695811) 2024-11-15T11:03:13,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742120_1296 (size=4695811) 2024-11-15T11:03:13,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742120_1296 (size=4695811) 2024-11-15T11:03:13,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742121_1297 (size=232957) 2024-11-15T11:03:13,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742121_1297 (size=232957) 2024-11-15T11:03:13,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742121_1297 (size=232957) 2024-11-15T11:03:13,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742122_1298 (size=127628) 2024-11-15T11:03:13,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742122_1298 (size=127628) 2024-11-15T11:03:13,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742122_1298 (size=127628) 2024-11-15T11:03:13,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742123_1299 (size=20406) 2024-11-15T11:03:13,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742123_1299 (size=20406) 2024-11-15T11:03:13,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742123_1299 (size=20406) 2024-11-15T11:03:13,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742124_1300 (size=5175431) 2024-11-15T11:03:13,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742124_1300 (size=5175431) 2024-11-15T11:03:13,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742124_1300 (size=5175431) 2024-11-15T11:03:13,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742125_1301 (size=217634) 2024-11-15T11:03:13,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742125_1301 (size=217634) 2024-11-15T11:03:13,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742125_1301 (size=217634) 2024-11-15T11:03:13,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742126_1302 (size=1832290) 2024-11-15T11:03:13,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742126_1302 (size=1832290) 2024-11-15T11:03:13,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742126_1302 (size=1832290) 2024-11-15T11:03:13,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742127_1303 (size=322274) 2024-11-15T11:03:13,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742127_1303 (size=322274) 2024-11-15T11:03:13,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742127_1303 (size=322274) 2024-11-15T11:03:13,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742128_1304 (size=503880) 2024-11-15T11:03:13,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742128_1304 (size=503880) 2024-11-15T11:03:13,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742128_1304 (size=503880) 2024-11-15T11:03:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742129_1305 (size=29229) 2024-11-15T11:03:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742129_1305 (size=29229) 2024-11-15T11:03:13,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742129_1305 (size=29229) 2024-11-15T11:03:13,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742130_1306 (size=24096) 2024-11-15T11:03:13,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742130_1306 (size=24096) 2024-11-15T11:03:13,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742130_1306 (size=24096) 2024-11-15T11:03:13,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742131_1307 (size=111872) 2024-11-15T11:03:13,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742131_1307 (size=111872) 2024-11-15T11:03:13,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742131_1307 (size=111872) 2024-11-15T11:03:13,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742132_1308 (size=45609) 2024-11-15T11:03:13,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742132_1308 (size=45609) 2024-11-15T11:03:13,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742132_1308 (size=45609) 2024-11-15T11:03:13,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742133_1309 (size=136454) 2024-11-15T11:03:13,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742133_1309 (size=136454) 2024-11-15T11:03:13,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742133_1309 (size=136454) 2024-11-15T11:03:13,272 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:03:13,274 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-15T11:03:13,276 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-15T11:03:13,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742134_1310 (size=378) 2024-11-15T11:03:13,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742134_1310 (size=378) 2024-11-15T11:03:13,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742134_1310 (size=378) 2024-11-15T11:03:13,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742135_1311 (size=15) 2024-11-15T11:03:13,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742135_1311 (size=15) 2024-11-15T11:03:13,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742135_1311 (size=15) 2024-11-15T11:03:13,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742136_1312 (size=303791) 2024-11-15T11:03:13,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742136_1312 (size=303791) 2024-11-15T11:03:13,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742136_1312 (size=303791) 2024-11-15T11:03:13,315 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:03:13,315 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:03:13,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:13,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-15T11:03:13,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:13,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-15T11:03:13,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-15T11:03:13,771 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0006_000001 (auth:SIMPLE) from 127.0.0.1:38046 2024-11-15T11:03:15,751 WARN [regionserver/4cdab6d79b20:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-11-15T11:03:18,882 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0006_000001 (auth:SIMPLE) from 127.0.0.1:49040 2024-11-15T11:03:19,091 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:03:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742137_1313 (size=349441) 2024-11-15T11:03:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742137_1313 (size=349441) 2024-11-15T11:03:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742137_1313 (size=349441) 2024-11-15T11:03:19,853 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5cc39749fcf86bff70bcc4547b5b621d changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:03:19,854 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f9a2c32652deb65138d9f04999d3eee3 changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:03:21,257 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0006_000001 (auth:SIMPLE) from 127.0.0.1:57168 2024-11-15T11:03:25,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742138_1314 (size=4945) 2024-11-15T11:03:25,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742138_1314 (size=4945) 2024-11-15T11:03:25,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742138_1314 (size=4945) 2024-11-15T11:03:25,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742139_1315 (size=4945) 2024-11-15T11:03:25,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742139_1315 (size=4945) 2024-11-15T11:03:25,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742139_1315 (size=4945) 2024-11-15T11:03:25,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742140_1316 (size=17474) 2024-11-15T11:03:25,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742140_1316 (size=17474) 2024-11-15T11:03:25,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742140_1316 (size=17474) 2024-11-15T11:03:25,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742141_1317 (size=482) 2024-11-15T11:03:25,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742141_1317 (size=482) 2024-11-15T11:03:25,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742141_1317 (size=482) 2024-11-15T11:03:25,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742142_1318 (size=17474) 2024-11-15T11:03:25,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742142_1318 (size=17474) 2024-11-15T11:03:25,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742142_1318 (size=17474) 2024-11-15T11:03:25,757 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0006/container_1731668480794_0006_01_000002/launch_container.sh] 2024-11-15T11:03:25,758 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0006/container_1731668480794_0006_01_000002/container_tokens] 2024-11-15T11:03:25,758 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0006/container_1731668480794_0006_01_000002/sysfs] 2024-11-15T11:03:25,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742143_1319 (size=349441) 2024-11-15T11:03:25,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742143_1319 (size=349441) 2024-11-15T11:03:25,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742143_1319 (size=349441) 2024-11-15T11:03:25,776 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0006_000001 (auth:SIMPLE) from 127.0.0.1:57184 2024-11-15T11:03:27,675 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:03:27,676 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:03:27,691 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:27,691 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:03:27,691 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:03:27,692 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:27,692 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-15T11:03:27,692 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-15T11:03:27,692 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:27,693 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-15T11:03:27,693 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668591696/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-15T11:03:27,699 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:27,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:27,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-15T11:03:27,703 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668607703"}]},"ts":"1731668607703"} 2024-11-15T11:03:27,705 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-15T11:03:27,705 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-15T11:03:27,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-15T11:03:27,707 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, UNASSIGN}] 2024-11-15T11:03:27,708 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, UNASSIGN 2024-11-15T11:03:27,709 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=769c6b89fbcdc818cf21aa5f370a5f6e, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:27,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, UNASSIGN because future has completed 2024-11-15T11:03:27,710 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:27,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-15T11:03:27,863 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:27,863 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:27,864 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing 769c6b89fbcdc818cf21aa5f370a5f6e, disabling compactions & flushes 2024-11-15T11:03:27,864 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:27,864 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:27,864 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. after waiting 0 ms 2024-11-15T11:03:27,864 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:27,870 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-15T11:03:27,871 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:27,871 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e. 2024-11-15T11:03:27,871 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for 769c6b89fbcdc818cf21aa5f370a5f6e: Waiting for close lock at 1731668607864Running coprocessor pre-close hooks at 1731668607864Disabling compacts and flushes for region at 1731668607864Disabling writes for close at 1731668607864Writing region close event to WAL at 1731668607865 (+1 ms)Running coprocessor post-close hooks at 1731668607871 (+6 ms)Closed at 1731668607871 2024-11-15T11:03:27,873 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed 769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:27,873 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=769c6b89fbcdc818cf21aa5f370a5f6e, regionState=CLOSED 2024-11-15T11:03:27,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:27,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-15T11:03:27,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure 769c6b89fbcdc818cf21aa5f370a5f6e, server=4cdab6d79b20,39207,1731668474152 in 168 msec 2024-11-15T11:03:27,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-15T11:03:27,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=769c6b89fbcdc818cf21aa5f370a5f6e, UNASSIGN in 173 msec 2024-11-15T11:03:27,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-15T11:03:27,885 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 178 msec 2024-11-15T11:03:27,887 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668607887"}]},"ts":"1731668607887"} 2024-11-15T11:03:27,890 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-15T11:03:27,890 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-15T11:03:27,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 192 msec 2024-11-15T11:03:28,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-15T11:03:28,015 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T11:03:28,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,018 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,018 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,021 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,022 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:28,022 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:28,022 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:28,024 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/recovered.edits] 2024-11-15T11:03:28,024 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/recovered.edits] 2024-11-15T11:03:28,024 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/recovered.edits] 2024-11-15T11:03:28,029 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/cf/0a7ebcf6451347bb8241286c67366120 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/cf/0a7ebcf6451347bb8241286c67366120 2024-11-15T11:03:28,029 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/cf/b3860e202c3b4833be61af85c77a6e54 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/cf/b3860e202c3b4833be61af85c77a6e54 2024-11-15T11:03:28,030 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/0a7ebcf6451347bb8241286c67366120.441a0925a8a77248db843c908bcbfd6b to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/0a7ebcf6451347bb8241286c67366120.441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:28,032 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/b3860e202c3b4833be61af85c77a6e54.2cd824b6831acfe6688b4305519b00a1 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/cf/b3860e202c3b4833be61af85c77a6e54.2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:28,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T11:03:28,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T11:03:28,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T11:03:28,033 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/recovered.edits/8.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b/recovered.edits/8.seqid 2024-11-15T11:03:28,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T11:03:28,034 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/441a0925a8a77248db843c908bcbfd6b 2024-11-15T11:03:28,034 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/recovered.edits/8.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1/recovered.edits/8.seqid 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-15T11:03:28,036 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:28,038 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:28,038 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2cd824b6831acfe6688b4305519b00a1 2024-11-15T11:03:28,038 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:28,038 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:28,042 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/recovered.edits/12.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e/recovered.edits/12.seqid 2024-11-15T11:03:28,042 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/769c6b89fbcdc818cf21aa5f370a5f6e 2024-11-15T11:03:28,042 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-15T11:03:28,045 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,049 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-15T11:03:28,051 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-15T11:03:28,053 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,053 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-15T11:03:28,053 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668608053"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:28,055 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-15T11:03:28,055 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 769c6b89fbcdc818cf21aa5f370a5f6e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e.', STARTKEY => '', ENDKEY => ''}] 2024-11-15T11:03:28,055 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-15T11:03:28,055 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668608055"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:28,058 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-15T11:03:28,059 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 43 msec 2024-11-15T11:03:28,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-15T11:03:28,146 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,146 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T11:03:28,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-15T11:03:28,150 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668608150"}]},"ts":"1731668608150"} 2024-11-15T11:03:28,153 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-15T11:03:28,153 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-15T11:03:28,154 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-15T11:03:28,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, UNASSIGN}] 2024-11-15T11:03:28,157 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, UNASSIGN 2024-11-15T11:03:28,157 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, UNASSIGN 2024-11-15T11:03:28,158 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=5cc39749fcf86bff70bcc4547b5b621d, regionState=CLOSING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:03:28,158 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=f9a2c32652deb65138d9f04999d3eee3, regionState=CLOSING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:28,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, UNASSIGN because future has completed 2024-11-15T11:03:28,162 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:28,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:03:28,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, UNASSIGN because future has completed 2024-11-15T11:03:28,164 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:28,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure f9a2c32652deb65138d9f04999d3eee3, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:28,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-15T11:03:28,316 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:28,316 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:28,316 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing 5cc39749fcf86bff70bcc4547b5b621d, disabling compactions & flushes 2024-11-15T11:03:28,316 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. after waiting 0 ms 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:28,317 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing f9a2c32652deb65138d9f04999d3eee3, disabling compactions & flushes 2024-11-15T11:03:28,317 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. after waiting 0 ms 2024-11-15T11:03:28,317 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:28,326 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:28,327 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:28,327 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:28,327 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d. 2024-11-15T11:03:28,327 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for 5cc39749fcf86bff70bcc4547b5b621d: Waiting for close lock at 1731668608316Running coprocessor pre-close hooks at 1731668608316Disabling compacts and flushes for region at 1731668608316Disabling writes for close at 1731668608317 (+1 ms)Writing region close event to WAL at 1731668608318 (+1 ms)Running coprocessor post-close hooks at 1731668608327 (+9 ms)Closed at 1731668608327 2024-11-15T11:03:28,328 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:28,328 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3. 2024-11-15T11:03:28,328 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for f9a2c32652deb65138d9f04999d3eee3: Waiting for close lock at 1731668608317Running coprocessor pre-close hooks at 1731668608317Disabling compacts and flushes for region at 1731668608317Disabling writes for close at 1731668608317Writing region close event to WAL at 1731668608319 (+2 ms)Running coprocessor post-close hooks at 1731668608328 (+9 ms)Closed at 1731668608328 2024-11-15T11:03:28,330 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed 5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:28,330 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=5cc39749fcf86bff70bcc4547b5b621d, regionState=CLOSED 2024-11-15T11:03:28,330 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:28,331 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=f9a2c32652deb65138d9f04999d3eee3, regionState=CLOSED 2024-11-15T11:03:28,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:03:28,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure f9a2c32652deb65138d9f04999d3eee3, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:28,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-11-15T11:03:28,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure 5cc39749fcf86bff70bcc4547b5b621d, server=4cdab6d79b20,36113,1731668474097 in 172 msec 2024-11-15T11:03:28,338 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-11-15T11:03:28,338 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure f9a2c32652deb65138d9f04999d3eee3, server=4cdab6d79b20,45865,1731668473964 in 171 msec 2024-11-15T11:03:28,339 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5cc39749fcf86bff70bcc4547b5b621d, UNASSIGN in 182 msec 2024-11-15T11:03:28,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=125 2024-11-15T11:03:28,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f9a2c32652deb65138d9f04999d3eee3, UNASSIGN in 183 msec 2024-11-15T11:03:28,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-15T11:03:28,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 188 msec 2024-11-15T11:03:28,345 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668608345"}]},"ts":"1731668608345"} 2024-11-15T11:03:28,348 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-15T11:03:28,348 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-15T11:03:28,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 202 msec 2024-11-15T11:03:28,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-15T11:03:28,466 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T11:03:28,467 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,469 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,470 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,473 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,477 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:28,477 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:28,480 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/recovered.edits] 2024-11-15T11:03:28,480 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/recovered.edits] 2024-11-15T11:03:28,484 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/cf/b00b7a7f8c024d2c8406f445a733dac5 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/cf/b00b7a7f8c024d2c8406f445a733dac5 2024-11-15T11:03:28,490 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d/recovered.edits/9.seqid 2024-11-15T11:03:28,490 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/5cc39749fcf86bff70bcc4547b5b621d 2024-11-15T11:03:28,492 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/cf/2eadf01a4d074d988349d4805888fb49 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/cf/2eadf01a4d074d988349d4805888fb49 2024-11-15T11:03:28,495 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3/recovered.edits/9.seqid 2024-11-15T11:03:28,496 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithMergeRegion/f9a2c32652deb65138d9f04999d3eee3 2024-11-15T11:03:28,496 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-15T11:03:28,498 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,501 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-15T11:03:28,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T11:03:28,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T11:03:28,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T11:03:28,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T11:03:28,541 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-15T11:03:28,543 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,543 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-15T11:03:28,543 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668608543"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:28,543 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668608543"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:28,545 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:03:28,545 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5cc39749fcf86bff70bcc4547b5b621d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731668587334.5cc39749fcf86bff70bcc4547b5b621d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f9a2c32652deb65138d9f04999d3eee3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731668587334.f9a2c32652deb65138d9f04999d3eee3.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:03:28,545 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-15T11:03:28,546 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668608545"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:28,548 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-15T11:03:28,549 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 82 msec 2024-11-15T11:03:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:28,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-15T11:03:28,563 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,564 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T11:03:28,574 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-15T11:03:28,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,577 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-15T11:03:28,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:28,582 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-15T11:03:28,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:28,610 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=801 (was 789) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 32213) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/4cdab6d79b20:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4508 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:45424 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:33996 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:35953 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:53302 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_937203006_1 at /127.0.0.1:45406 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=808 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=464 (was 518), ProcessCount=22 (was 22), AvailableMemoryMB=504 (was 663) 2024-11-15T11:03:28,610 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-15T11:03:28,636 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=801, OpenFileDescriptor=808, MaxFileDescriptor=1048576, SystemLoadAverage=464, ProcessCount=22, AvailableMemoryMB=497 2024-11-15T11:03:28,636 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-15T11:03:28,637 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:03:28,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:28,639 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:03:28,640 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:28,640 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-15T11:03:28,641 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:03:28,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T11:03:28,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742144_1320 (size=407) 2024-11-15T11:03:28,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742144_1320 (size=407) 2024-11-15T11:03:28,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742144_1320 (size=407) 2024-11-15T11:03:28,653 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 789045c0ac6d08809a2f0b884aa7c9dd, NAME => 'testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:28,653 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1ee72a79fecd541294e0827d4a8f7fba, NAME => 'testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:28,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742145_1321 (size=68) 2024-11-15T11:03:28,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742145_1321 (size=68) 2024-11-15T11:03:28,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742145_1321 (size=68) 2024-11-15T11:03:28,675 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:28,675 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 1ee72a79fecd541294e0827d4a8f7fba, disabling compactions & flushes 2024-11-15T11:03:28,675 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:28,675 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:28,675 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. after waiting 0 ms 2024-11-15T11:03:28,675 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:28,675 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:28,675 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1ee72a79fecd541294e0827d4a8f7fba: Waiting for close lock at 1731668608675Disabling compacts and flushes for region at 1731668608675Disabling writes for close at 1731668608675Writing region close event to WAL at 1731668608675Closed at 1731668608675 2024-11-15T11:03:28,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742146_1322 (size=68) 2024-11-15T11:03:28,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742146_1322 (size=68) 2024-11-15T11:03:28,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742146_1322 (size=68) 2024-11-15T11:03:28,680 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:28,680 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 789045c0ac6d08809a2f0b884aa7c9dd, disabling compactions & flushes 2024-11-15T11:03:28,680 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:28,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:28,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. after waiting 0 ms 2024-11-15T11:03:28,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:28,681 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:28,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 789045c0ac6d08809a2f0b884aa7c9dd: Waiting for close lock at 1731668608680Disabling compacts and flushes for region at 1731668608680Disabling writes for close at 1731668608681 (+1 ms)Writing region close event to WAL at 1731668608681Closed at 1731668608681 2024-11-15T11:03:28,682 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:03:28,682 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731668608682"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668608682"}]},"ts":"1731668608682"} 2024-11-15T11:03:28,682 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731668608682"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668608682"}]},"ts":"1731668608682"} 2024-11-15T11:03:28,686 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:03:28,687 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:03:28,687 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668608687"}]},"ts":"1731668608687"} 2024-11-15T11:03:28,689 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-15T11:03:28,690 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:03:28,691 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:03:28,691 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:03:28,691 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:03:28,691 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:03:28,691 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:03:28,691 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:03:28,691 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:03:28,691 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:03:28,691 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:03:28,691 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:03:28,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, ASSIGN}] 2024-11-15T11:03:28,694 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, ASSIGN 2024-11-15T11:03:28,694 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, ASSIGN 2024-11-15T11:03:28,695 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:03:28,695 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:03:28,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T11:03:28,846 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:03:28,846 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=1ee72a79fecd541294e0827d4a8f7fba, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:28,846 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=789045c0ac6d08809a2f0b884aa7c9dd, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:28,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, ASSIGN because future has completed 2024-11-15T11:03:28,849 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:28,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, ASSIGN because future has completed 2024-11-15T11:03:28,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:28,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T11:03:29,005 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:29,005 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => 1ee72a79fecd541294e0827d4a8f7fba, NAME => 'testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:03:29,005 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,005 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. service=AccessControlService 2024-11-15T11:03:29,005 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 789045c0ac6d08809a2f0b884aa7c9dd, NAME => 'testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:03:29,005 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:29,005 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. service=AccessControlService 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,006 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,006 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,007 INFO [StoreOpener-1ee72a79fecd541294e0827d4a8f7fba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,008 INFO [StoreOpener-789045c0ac6d08809a2f0b884aa7c9dd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,009 INFO [StoreOpener-1ee72a79fecd541294e0827d4a8f7fba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ee72a79fecd541294e0827d4a8f7fba columnFamilyName cf 2024-11-15T11:03:29,009 INFO [StoreOpener-789045c0ac6d08809a2f0b884aa7c9dd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 789045c0ac6d08809a2f0b884aa7c9dd columnFamilyName cf 2024-11-15T11:03:29,009 DEBUG [StoreOpener-1ee72a79fecd541294e0827d4a8f7fba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:29,009 DEBUG [StoreOpener-789045c0ac6d08809a2f0b884aa7c9dd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:29,010 INFO [StoreOpener-1ee72a79fecd541294e0827d4a8f7fba-1 {}] regionserver.HStore(327): Store=1ee72a79fecd541294e0827d4a8f7fba/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:29,010 INFO [StoreOpener-789045c0ac6d08809a2f0b884aa7c9dd-1 {}] regionserver.HStore(327): Store=789045c0ac6d08809a2f0b884aa7c9dd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:29,010 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,010 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,011 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,011 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,011 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,011 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,012 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,012 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,012 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,012 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,013 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,014 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,016 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:29,017 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened 1ee72a79fecd541294e0827d4a8f7fba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74609431, jitterRate=0.11176715791225433}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:29,017 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,018 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for 1ee72a79fecd541294e0827d4a8f7fba: Running coprocessor pre-open hook at 1731668609006Writing region info on filesystem at 1731668609006Initializing all the Stores at 1731668609007 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668609007Cleaning up temporary data from old regions at 1731668609012 (+5 ms)Running coprocessor post-open hooks at 1731668609017 (+5 ms)Region opened successfully at 1731668609017 2024-11-15T11:03:29,018 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:29,018 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba., pid=134, masterSystemTime=1731668609001 2024-11-15T11:03:29,018 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 789045c0ac6d08809a2f0b884aa7c9dd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64114737, jitterRate=-0.0446159690618515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:29,019 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,019 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 789045c0ac6d08809a2f0b884aa7c9dd: Running coprocessor pre-open hook at 1731668609006Writing region info on filesystem at 1731668609006Initializing all the Stores at 1731668609007 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668609007Cleaning up temporary data from old regions at 1731668609012 (+5 ms)Running coprocessor post-open hooks at 1731668609019 (+7 ms)Region opened successfully at 1731668609019 2024-11-15T11:03:29,020 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd., pid=135, masterSystemTime=1731668609002 2024-11-15T11:03:29,021 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:29,021 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:29,022 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=1ee72a79fecd541294e0827d4a8f7fba, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:29,022 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,022 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,023 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=789045c0ac6d08809a2f0b884aa7c9dd, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:29,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:29,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:29,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=133 2024-11-15T11:03:29,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba, server=4cdab6d79b20,39207,1731668474152 in 178 msec 2024-11-15T11:03:29,029 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=132 2024-11-15T11:03:29,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd, server=4cdab6d79b20,45865,1731668473964 in 177 msec 2024-11-15T11:03:29,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, ASSIGN in 338 msec 2024-11-15T11:03:29,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=132, resume processing ppid=131 2024-11-15T11:03:29,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, ASSIGN in 339 msec 2024-11-15T11:03:29,034 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:03:29,034 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668609034"}]},"ts":"1731668609034"} 2024-11-15T11:03:29,036 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-15T11:03:29,037 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:03:29,037 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-15T11:03:29,040 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T11:03:29,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:29,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:29,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:29,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:29,045 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:29,045 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:29,045 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:29,046 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:29,047 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 408 msec 2024-11-15T11:03:29,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T11:03:29,266 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T11:03:29,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-15T11:03:29,266 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:29,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-15T11:03:29,271 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:29,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-11-15T11:03:29,272 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:29,275 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T11:03:29,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668609275 (current time:1731668609275). 2024-11-15T11:03:29,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:29,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-15T11:03:29,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:29,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@780bf944, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:29,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:29,277 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:29,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:29,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:29,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fcfb61f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:29,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:29,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,279 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54138, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:29,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2354993c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:29,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:29,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:29,282 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36510, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:29,283 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:29,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:29,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,284 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:29,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47b480a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:29,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:29,285 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:29,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:29,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:29,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@677dd2a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:29,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:29,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,287 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:29,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bfe4853, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:29,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:29,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:29,290 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:29,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:29,293 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:29,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:29,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,293 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:29,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T11:03:29,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:29,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T11:03:29,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-15T11:03:29,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:29,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-15T11:03:29,297 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:29,299 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:29,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742147_1323 (size=170) 2024-11-15T11:03:29,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742147_1323 (size=170) 2024-11-15T11:03:29,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742147_1323 (size=170) 2024-11-15T11:03:29,316 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:29,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba}] 2024-11-15T11:03:29,317 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,317 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-15T11:03:29,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-15T11:03:29,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 1ee72a79fecd541294e0827d4a8f7fba: 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for 789045c0ac6d08809a2f0b884aa7c9dd: 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:29,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:29,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:29,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:29,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742149_1325 (size=71) 2024-11-15T11:03:29,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742148_1324 (size=71) 2024-11-15T11:03:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742149_1325 (size=71) 2024-11-15T11:03:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742148_1324 (size=71) 2024-11-15T11:03:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742148_1324 (size=71) 2024-11-15T11:03:29,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:29,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-15T11:03:29,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-15T11:03:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742149_1325 (size=71) 2024-11-15T11:03:29,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-15T11:03:29,484 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-15T11:03:29,484 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,484 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,485 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd in 169 msec 2024-11-15T11:03:29,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=136 2024-11-15T11:03:29,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba in 170 msec 2024-11-15T11:03:29,490 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:29,490 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:29,493 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:29,493 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,493 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742150_1326 (size=552) 2024-11-15T11:03:29,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742150_1326 (size=552) 2024-11-15T11:03:29,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742150_1326 (size=552) 2024-11-15T11:03:29,508 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:29,512 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:29,512 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,513 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:29,513 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-15T11:03:29,515 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 220 msec 2024-11-15T11:03:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-15T11:03:29,616 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T11:03:29,620 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='0d39d6b27eb8fe9caea1e37f955dcfe8c', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:29,621 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='1353aa66b5a1173c6b0f76c2673e92456', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:29,623 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='27347c3a41193d9f1eebdbe4a7f2ada52', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:29,624 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='322749816258be60bf8b6a42ede0e3a40', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:29,625 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4365f5e4c70a3022b6b1f1195d94bfb5d', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:29,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:29,652 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:29,654 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:29,658 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-15T11:03:29,658 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,659 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:29,661 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:29,667 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:29,674 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:29,677 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T11:03:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668609677 (current time:1731668609677). 2024-11-15T11:03:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-15T11:03:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@101bc341, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:29,679 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:29,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:29,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:29,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dfe58fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:29,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:29,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,681 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:29,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560a120d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:29,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:29,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:29,684 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36532, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:29,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,686 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68746523, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:29,688 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:29,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:29,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:29,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@656f13a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:29,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:29,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,690 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:29,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34e1b3c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:29,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:29,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:29,694 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:29,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:29,698 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:29,698 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T11:03:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T11:03:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-15T11:03:29,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-15T11:03:29,701 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:29,702 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:29,706 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742151_1327 (size=165) 2024-11-15T11:03:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742151_1327 (size=165) 2024-11-15T11:03:29,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742151_1327 (size=165) 2024-11-15T11:03:29,725 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:29,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba}] 2024-11-15T11:03:29,726 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,726 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-15T11:03:29,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-15T11:03:29,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-15T11:03:29,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:29,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,881 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing 789045c0ac6d08809a2f0b884aa7c9dd 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-15T11:03:29,881 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 1ee72a79fecd541294e0827d4a8f7fba 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-15T11:03:29,906 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/.tmp/cf/a45c81fd9a99448d842f38e65df78857 is 71, key is 015375e49a086516084938e83f7c2336/cf:q/1731668609628/Put/seqid=0 2024-11-15T11:03:29,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/.tmp/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b is 71, key is 10a2fe0163876fbe3b909ce1d0d8b1e0/cf:q/1731668609652/Put/seqid=0 2024-11-15T11:03:29,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742152_1328 (size=5568) 2024-11-15T11:03:29,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742152_1328 (size=5568) 2024-11-15T11:03:29,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742152_1328 (size=5568) 2024-11-15T11:03:29,923 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/.tmp/cf/a45c81fd9a99448d842f38e65df78857 2024-11-15T11:03:29,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742153_1329 (size=8052) 2024-11-15T11:03:29,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742153_1329 (size=8052) 2024-11-15T11:03:29,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742153_1329 (size=8052) 2024-11-15T11:03:29,928 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/.tmp/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b 2024-11-15T11:03:29,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/.tmp/cf/a45c81fd9a99448d842f38e65df78857 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/cf/a45c81fd9a99448d842f38e65df78857 2024-11-15T11:03:29,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/.tmp/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b 2024-11-15T11:03:29,942 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/cf/a45c81fd9a99448d842f38e65df78857, entries=7, sequenceid=6, filesize=5.4 K 2024-11-15T11:03:29,944 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 789045c0ac6d08809a2f0b884aa7c9dd in 63ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:29,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-15T11:03:29,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for 789045c0ac6d08809a2f0b884aa7c9dd: 2024-11-15T11:03:29,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. for snaptb0-testExportExpiredSnapshot completed. 2024-11-15T11:03:29,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:29,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/cf/a45c81fd9a99448d842f38e65df78857] hfiles 2024-11-15T11:03:29,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/cf/a45c81fd9a99448d842f38e65df78857 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,951 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b, entries=43, sequenceid=6, filesize=7.9 K 2024-11-15T11:03:29,954 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 1ee72a79fecd541294e0827d4a8f7fba in 73ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:29,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 1ee72a79fecd541294e0827d4a8f7fba: 2024-11-15T11:03:29,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. for snaptb0-testExportExpiredSnapshot completed. 2024-11-15T11:03:29,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:29,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b] hfiles 2024-11-15T11:03:29,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742154_1330 (size=110) 2024-11-15T11:03:29,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742154_1330 (size=110) 2024-11-15T11:03:29,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742154_1330 (size=110) 2024-11-15T11:03:29,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:29,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-15T11:03:29,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-15T11:03:29,976 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,977 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:29,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742155_1331 (size=110) 2024-11-15T11:03:29,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742155_1331 (size=110) 2024-11-15T11:03:29,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd in 253 msec 2024-11-15T11:03:29,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742155_1331 (size=110) 2024-11-15T11:03:29,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:29,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-15T11:03:29,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-15T11:03:29,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,981 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:29,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=139 2024-11-15T11:03:29,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba in 257 msec 2024-11-15T11:03:29,984 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:29,985 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:29,985 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:29,985 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,986 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:29,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742156_1332 (size=630) 2024-11-15T11:03:29,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742156_1332 (size=630) 2024-11-15T11:03:29,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742156_1332 (size=630) 2024-11-15T11:03:30,001 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:30,005 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:30,006 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:30,007 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:30,007 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-15T11:03:30,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 309 msec 2024-11-15T11:03:30,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-15T11:03:30,015 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T11:03:30,017 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:03:30,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-15T11:03:30,018 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:03:30,018 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:30,018 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-15T11:03:30,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T11:03:30,022 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:03:30,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742157_1333 (size=400) 2024-11-15T11:03:30,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742157_1333 (size=400) 2024-11-15T11:03:30,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742157_1333 (size=400) 2024-11-15T11:03:30,034 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 62aaee921674ca29eb5bfbb30b16733a, NAME => 'testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:30,034 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 26514d1e32bf69240f59f1d352a7ad1d, NAME => 'testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:30,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742158_1334 (size=61) 2024-11-15T11:03:30,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742158_1334 (size=61) 2024-11-15T11:03:30,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742158_1334 (size=61) 2024-11-15T11:03:30,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742159_1335 (size=61) 2024-11-15T11:03:30,046 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:30,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742159_1335 (size=61) 2024-11-15T11:03:30,046 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 26514d1e32bf69240f59f1d352a7ad1d, disabling compactions & flushes 2024-11-15T11:03:30,046 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,046 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,046 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. after waiting 0 ms 2024-11-15T11:03:30,046 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,046 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742159_1335 (size=61) 2024-11-15T11:03:30,046 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 26514d1e32bf69240f59f1d352a7ad1d: Waiting for close lock at 1731668610046Disabling compacts and flushes for region at 1731668610046Disabling writes for close at 1731668610046Writing region close event to WAL at 1731668610046Closed at 1731668610046 2024-11-15T11:03:30,048 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:30,048 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 62aaee921674ca29eb5bfbb30b16733a, disabling compactions & flushes 2024-11-15T11:03:30,048 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,048 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,048 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. after waiting 0 ms 2024-11-15T11:03:30,048 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,048 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,048 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 62aaee921674ca29eb5bfbb30b16733a: Waiting for close lock at 1731668610048Disabling compacts and flushes for region at 1731668610048Disabling writes for close at 1731668610048Writing region close event to WAL at 1731668610048Closed at 1731668610048 2024-11-15T11:03:30,049 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:03:30,050 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731668610049"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668610049"}]},"ts":"1731668610049"} 2024-11-15T11:03:30,050 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731668610049"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668610049"}]},"ts":"1731668610049"} 2024-11-15T11:03:30,053 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:03:30,053 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:03:30,054 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668610053"}]},"ts":"1731668610053"} 2024-11-15T11:03:30,055 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-15T11:03:30,056 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:03:30,057 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:03:30,057 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:03:30,057 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:03:30,057 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:03:30,057 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:03:30,057 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:03:30,057 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:03:30,057 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:03:30,057 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:03:30,057 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:03:30,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=26514d1e32bf69240f59f1d352a7ad1d, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=62aaee921674ca29eb5bfbb30b16733a, ASSIGN}] 2024-11-15T11:03:30,059 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=62aaee921674ca29eb5bfbb30b16733a, ASSIGN 2024-11-15T11:03:30,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=26514d1e32bf69240f59f1d352a7ad1d, ASSIGN 2024-11-15T11:03:30,059 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=62aaee921674ca29eb5bfbb30b16733a, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:03:30,059 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=26514d1e32bf69240f59f1d352a7ad1d, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:03:30,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T11:03:30,210 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:03:30,210 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=62aaee921674ca29eb5bfbb30b16733a, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:30,210 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=26514d1e32bf69240f59f1d352a7ad1d, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:30,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=26514d1e32bf69240f59f1d352a7ad1d, ASSIGN because future has completed 2024-11-15T11:03:30,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26514d1e32bf69240f59f1d352a7ad1d, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:30,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=62aaee921674ca29eb5bfbb30b16733a, ASSIGN because future has completed 2024-11-15T11:03:30,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 62aaee921674ca29eb5bfbb30b16733a, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T11:03:30,369 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,369 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => 26514d1e32bf69240f59f1d352a7ad1d, NAME => 'testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:03:30,370 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. service=AccessControlService 2024-11-15T11:03:30,370 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:30,370 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,370 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 62aaee921674ca29eb5bfbb30b16733a, NAME => 'testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. service=AccessControlService 2024-11-15T11:03:30,371 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,371 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,376 INFO [StoreOpener-26514d1e32bf69240f59f1d352a7ad1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,378 INFO [StoreOpener-26514d1e32bf69240f59f1d352a7ad1d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26514d1e32bf69240f59f1d352a7ad1d columnFamilyName cf 2024-11-15T11:03:30,378 DEBUG [StoreOpener-26514d1e32bf69240f59f1d352a7ad1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:30,379 INFO [StoreOpener-26514d1e32bf69240f59f1d352a7ad1d-1 {}] regionserver.HStore(327): Store=26514d1e32bf69240f59f1d352a7ad1d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:30,379 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,380 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,380 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,380 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,380 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,382 INFO [StoreOpener-62aaee921674ca29eb5bfbb30b16733a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,383 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,385 INFO [StoreOpener-62aaee921674ca29eb5bfbb30b16733a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62aaee921674ca29eb5bfbb30b16733a columnFamilyName cf 2024-11-15T11:03:30,385 DEBUG [StoreOpener-62aaee921674ca29eb5bfbb30b16733a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:30,386 INFO [StoreOpener-62aaee921674ca29eb5bfbb30b16733a-1 {}] regionserver.HStore(327): Store=62aaee921674ca29eb5bfbb30b16733a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:30,386 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,387 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,387 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,387 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:30,388 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,388 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,388 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened 26514d1e32bf69240f59f1d352a7ad1d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59557204, jitterRate=-0.11252850294113159}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:30,388 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,389 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for 26514d1e32bf69240f59f1d352a7ad1d: Running coprocessor pre-open hook at 1731668610371Writing region info on filesystem at 1731668610371Initializing all the Stores at 1731668610373 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668610373Cleaning up temporary data from old regions at 1731668610380 (+7 ms)Running coprocessor post-open hooks at 1731668610388 (+8 ms)Region opened successfully at 1731668610389 (+1 ms) 2024-11-15T11:03:30,390 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,392 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d., pid=145, masterSystemTime=1731668610365 2024-11-15T11:03:30,392 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:30,393 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 62aaee921674ca29eb5bfbb30b16733a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59576029, jitterRate=-0.11224798858165741}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:30,393 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,393 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 62aaee921674ca29eb5bfbb30b16733a: Running coprocessor pre-open hook at 1731668610372Writing region info on filesystem at 1731668610372Initializing all the Stores at 1731668610373 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668610373Cleaning up temporary data from old regions at 1731668610388 (+15 ms)Running coprocessor post-open hooks at 1731668610393 (+5 ms)Region opened successfully at 1731668610393 2024-11-15T11:03:30,394 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,394 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,394 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a., pid=146, masterSystemTime=1731668610367 2024-11-15T11:03:30,395 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=26514d1e32bf69240f59f1d352a7ad1d, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:30,396 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,397 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,397 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=62aaee921674ca29eb5bfbb30b16733a, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:30,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26514d1e32bf69240f59f1d352a7ad1d, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:30,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 62aaee921674ca29eb5bfbb30b16733a, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:30,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-15T11:03:30,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure 26514d1e32bf69240f59f1d352a7ad1d, server=4cdab6d79b20,39207,1731668474152 in 185 msec 2024-11-15T11:03:30,401 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-11-15T11:03:30,401 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 62aaee921674ca29eb5bfbb30b16733a, server=4cdab6d79b20,45865,1731668473964 in 186 msec 2024-11-15T11:03:30,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=26514d1e32bf69240f59f1d352a7ad1d, ASSIGN in 344 msec 2024-11-15T11:03:30,403 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=142 2024-11-15T11:03:30,403 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=62aaee921674ca29eb5bfbb30b16733a, ASSIGN in 345 msec 2024-11-15T11:03:30,404 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:03:30,404 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668610404"}]},"ts":"1731668610404"} 2024-11-15T11:03:30,406 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-15T11:03:30,407 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:03:30,407 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-15T11:03:30,410 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T11:03:30,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:30,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:30,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:30,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:30,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 397 msec 2024-11-15T11:03:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T11:03:30,645 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-15T11:03:30,645 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-15T11:03:30,645 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:30,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-15T11:03:30,650 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:30,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-11-15T11:03:30,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:30,655 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='0896dfa5b39a8eecb9f894b852081d0e6', locateType=CURRENT is [region=testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:30,656 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='157fe01cbd9071dc18708e4da5f8e7a1b', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:30,657 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='21f667aad6bc2471b3173e9d26f1e65e3', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:30,658 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='3717f66888a058be2a21418f6ef0743f7', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:30,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:30,663 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:30,664 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:30,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-15T11:03:30,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,667 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:30,668 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:30,673 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T11:03:30,677 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-15T11:03:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-15T11:03:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b0a55b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:30,679 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:30,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:30,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:30,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33b211e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:30,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:30,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:30,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:30,680 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45266, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:30,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d8edc67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:30,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:30,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:30,683 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:30,684 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:30,684 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16fb27de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:30,685 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:30,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:30,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:30,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@316c7568, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:30,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:30,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:30,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:30,686 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45276, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:30,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a94d89d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:30,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:30,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:30,689 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:30,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:30,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:30,691 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T11:03:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-15T11:03:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-15T11:03:30,694 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-15T11:03:30,695 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:30,696 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742160_1336 (size=152) 2024-11-15T11:03:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742160_1336 (size=152) 2024-11-15T11:03:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742160_1336 (size=152) 2024-11-15T11:03:30,703 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:30,703 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 26514d1e32bf69240f59f1d352a7ad1d}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 62aaee921674ca29eb5bfbb30b16733a}] 2024-11-15T11:03:30,704 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,704 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-15T11:03:30,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-15T11:03:30,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-15T11:03:30,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,856 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing 62aaee921674ca29eb5bfbb30b16733a 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-15T11:03:30,856 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing 26514d1e32bf69240f59f1d352a7ad1d 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-15T11:03:30,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/.tmp/cf/876f37f1038244f7939155e2d9d35032 is 71, key is 0386768441069d1acbcd63e4b2194289/cf:q/1731668610661/Put/seqid=0 2024-11-15T11:03:30,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/.tmp/cf/2951be8b17944a6982224fd214b5332a is 71, key is 11d66e77b722a74a79889c07af0e269a/cf:q/1731668610663/Put/seqid=0 2024-11-15T11:03:30,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742161_1337 (size=5566) 2024-11-15T11:03:30,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742161_1337 (size=5566) 2024-11-15T11:03:30,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742161_1337 (size=5566) 2024-11-15T11:03:30,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742162_1338 (size=8052) 2024-11-15T11:03:30,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742162_1338 (size=8052) 2024-11-15T11:03:30,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742162_1338 (size=8052) 2024-11-15T11:03:30,879 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/.tmp/cf/876f37f1038244f7939155e2d9d35032 2024-11-15T11:03:30,879 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/.tmp/cf/2951be8b17944a6982224fd214b5332a 2024-11-15T11:03:30,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/.tmp/cf/876f37f1038244f7939155e2d9d35032 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/cf/876f37f1038244f7939155e2d9d35032 2024-11-15T11:03:30,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/.tmp/cf/2951be8b17944a6982224fd214b5332a as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/cf/2951be8b17944a6982224fd214b5332a 2024-11-15T11:03:30,889 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/cf/2951be8b17944a6982224fd214b5332a, entries=43, sequenceid=5, filesize=7.9 K 2024-11-15T11:03:30,890 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/cf/876f37f1038244f7939155e2d9d35032, entries=7, sequenceid=5, filesize=5.4 K 2024-11-15T11:03:30,891 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 26514d1e32bf69240f59f1d352a7ad1d in 35ms, sequenceid=5, compaction requested=false 2024-11-15T11:03:30,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-15T11:03:30,892 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 62aaee921674ca29eb5bfbb30b16733a in 36ms, sequenceid=5, compaction requested=false 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for 26514d1e32bf69240f59f1d352a7ad1d: 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for 62aaee921674ca29eb5bfbb30b16733a: 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. for snapshot-testExportExpiredSnapshot completed. 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. for snapshot-testExportExpiredSnapshot completed. 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/cf/2951be8b17944a6982224fd214b5332a] hfiles 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/cf/2951be8b17944a6982224fd214b5332a for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/cf/876f37f1038244f7939155e2d9d35032] hfiles 2024-11-15T11:03:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/cf/876f37f1038244f7939155e2d9d35032 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T11:03:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742163_1339 (size=103) 2024-11-15T11:03:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742163_1339 (size=103) 2024-11-15T11:03:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742163_1339 (size=103) 2024-11-15T11:03:30,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:03:30,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-15T11:03:30,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-15T11:03:30,909 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,910 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:03:30,912 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 62aaee921674ca29eb5bfbb30b16733a in 207 msec 2024-11-15T11:03:30,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742164_1340 (size=103) 2024-11-15T11:03:30,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742164_1340 (size=103) 2024-11-15T11:03:30,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742164_1340 (size=103) 2024-11-15T11:03:30,918 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:03:30,918 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-15T11:03:30,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-15T11:03:30,919 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,919 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:03:30,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=148, resume processing ppid=147 2024-11-15T11:03:30,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 26514d1e32bf69240f59f1d352a7ad1d in 217 msec 2024-11-15T11:03:30,922 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:30,922 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:30,923 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:30,923 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-15T11:03:30,924 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-15T11:03:30,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742165_1341 (size=609) 2024-11-15T11:03:30,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742165_1341 (size=609) 2024-11-15T11:03:30,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742165_1341 (size=609) 2024-11-15T11:03:30,940 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:30,944 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:30,945 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-15T11:03:30,946 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:30,946 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-15T11:03:30,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 254 msec 2024-11-15T11:03:31,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-15T11:03:31,015 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-15T11:03:31,855 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0006_000001 (auth:SIMPLE) from 127.0.0.1:48234 2024-11-15T11:03:31,867 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_2/usercache/jenkins/appcache/application_1731668480794_0006/container_1731668480794_0006_01_000001/launch_container.sh] 2024-11-15T11:03:31,867 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_2/usercache/jenkins/appcache/application_1731668480794_0006/container_1731668480794_0006_01_000001/container_tokens] 2024-11-15T11:03:31,867 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_2/usercache/jenkins/appcache/application_1731668480794_0006/container_1731668480794_0006_01_000001/sysfs] 2024-11-15T11:03:33,400 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:03:33,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-15T11:03:33,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-15T11:03:33,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-15T11:03:33,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-15T11:03:33,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T11:03:33,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T11:03:39,090 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:03:41,024 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668621024 2024-11-15T11:03:41,024 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668621024, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668621024, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:41,066 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:41,067 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668621024, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668621024/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-15T11:03:41,082 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:03:41,083 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:03:41,085 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-15T11:03:41,089 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668621089"}]},"ts":"1731668621089"} 2024-11-15T11:03:41,092 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-15T11:03:41,092 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-15T11:03:41,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-15T11:03:41,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, UNASSIGN}] 2024-11-15T11:03:41,098 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, UNASSIGN 2024-11-15T11:03:41,098 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, UNASSIGN 2024-11-15T11:03:41,100 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=789045c0ac6d08809a2f0b884aa7c9dd, regionState=CLOSING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:41,100 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=1ee72a79fecd541294e0827d4a8f7fba, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:41,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, UNASSIGN because future has completed 2024-11-15T11:03:41,103 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:41,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:41,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, UNASSIGN because future has completed 2024-11-15T11:03:41,104 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:41,105 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:41,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-15T11:03:41,257 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:41,258 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:41,258 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing 789045c0ac6d08809a2f0b884aa7c9dd, disabling compactions & flushes 2024-11-15T11:03:41,258 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:41,258 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:41,258 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. after waiting 0 ms 2024-11-15T11:03:41,258 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:41,259 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:41,259 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:41,259 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing 1ee72a79fecd541294e0827d4a8f7fba, disabling compactions & flushes 2024-11-15T11:03:41,259 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:41,259 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:41,259 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. after waiting 0 ms 2024-11-15T11:03:41,259 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:41,268 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:41,269 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:41,269 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd. 2024-11-15T11:03:41,269 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for 789045c0ac6d08809a2f0b884aa7c9dd: Waiting for close lock at 1731668621258Running coprocessor pre-close hooks at 1731668621258Disabling compacts and flushes for region at 1731668621258Disabling writes for close at 1731668621258Writing region close event to WAL at 1731668621260 (+2 ms)Running coprocessor post-close hooks at 1731668621269 (+9 ms)Closed at 1731668621269 2024-11-15T11:03:41,271 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed 789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:41,272 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=789045c0ac6d08809a2f0b884aa7c9dd, regionState=CLOSED 2024-11-15T11:03:41,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:41,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=152 2024-11-15T11:03:41,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure 789045c0ac6d08809a2f0b884aa7c9dd, server=4cdab6d79b20,45865,1731668473964 in 172 msec 2024-11-15T11:03:41,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=789045c0ac6d08809a2f0b884aa7c9dd, UNASSIGN in 181 msec 2024-11-15T11:03:41,279 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:41,279 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:41,279 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba. 2024-11-15T11:03:41,279 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for 1ee72a79fecd541294e0827d4a8f7fba: Waiting for close lock at 1731668621259Running coprocessor pre-close hooks at 1731668621259Disabling compacts and flushes for region at 1731668621259Disabling writes for close at 1731668621259Writing region close event to WAL at 1731668621264 (+5 ms)Running coprocessor post-close hooks at 1731668621279 (+15 ms)Closed at 1731668621279 2024-11-15T11:03:41,281 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed 1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:41,282 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=1ee72a79fecd541294e0827d4a8f7fba, regionState=CLOSED 2024-11-15T11:03:41,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:41,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=153 2024-11-15T11:03:41,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 1ee72a79fecd541294e0827d4a8f7fba, server=4cdab6d79b20,39207,1731668474152 in 180 msec 2024-11-15T11:03:41,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=151 2024-11-15T11:03:41,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1ee72a79fecd541294e0827d4a8f7fba, UNASSIGN in 191 msec 2024-11-15T11:03:41,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-15T11:03:41,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 195 msec 2024-11-15T11:03:41,293 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668621292"}]},"ts":"1731668621292"} 2024-11-15T11:03:41,295 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-15T11:03:41,295 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-15T11:03:41,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 211 msec 2024-11-15T11:03:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-15T11:03:41,406 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T11:03:41,407 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,409 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,410 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,413 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,414 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:41,414 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:41,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,418 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/recovered.edits] 2024-11-15T11:03:41,418 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/recovered.edits] 2024-11-15T11:03:41,420 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-15T11:03:41,420 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-15T11:03:41,420 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-15T11:03:41,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,421 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-11-15T11:03:41,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,421 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T11:03:41,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-15T11:03:41,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,426 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,427 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/cf/2a03dd65c8f24cc1b46a1ca13aaedf1b 2024-11-15T11:03:41,427 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/cf/a45c81fd9a99448d842f38e65df78857 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/cf/a45c81fd9a99448d842f38e65df78857 2024-11-15T11:03:41,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,431 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd/recovered.edits/9.seqid 2024-11-15T11:03:41,431 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba/recovered.edits/9.seqid 2024-11-15T11:03:41,431 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/789045c0ac6d08809a2f0b884aa7c9dd 2024-11-15T11:03:41,431 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportExpiredSnapshot/1ee72a79fecd541294e0827d4a8f7fba 2024-11-15T11:03:41,431 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-15T11:03:41,434 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,436 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-15T11:03:41,439 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-15T11:03:41,440 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,440 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-15T11:03:41,440 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668621440"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:41,440 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668621440"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:41,442 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:03:41,442 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 789045c0ac6d08809a2f0b884aa7c9dd, NAME => 'testtb-testExportExpiredSnapshot,,1731668608637.789045c0ac6d08809a2f0b884aa7c9dd.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1ee72a79fecd541294e0827d4a8f7fba, NAME => 'testtb-testExportExpiredSnapshot,1,1731668608637.1ee72a79fecd541294e0827d4a8f7fba.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:03:41,443 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-15T11:03:41,443 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668621443"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:41,444 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-15T11:03:41,445 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 38 msec 2024-11-15T11:03:41,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-15T11:03:41,526 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-15T11:03:41,526 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T11:03:41,534 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-15T11:03:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-15T11:03:41,537 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-15T11:03:41,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-15T11:03:41,540 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-15T11:03:41,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-15T11:03:41,564 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=795 (was 801), OpenFileDescriptor=785 (was 808), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=400 (was 464), ProcessCount=13 (was 22), AvailableMemoryMB=1488 (was 497) - AvailableMemoryMB LEAK? - 2024-11-15T11:03:41,564 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-15T11:03:41,583 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=795, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=400, ProcessCount=13, AvailableMemoryMB=1497 2024-11-15T11:03:41,583 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-15T11:03:41,585 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:03:41,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:41,587 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:03:41,587 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-15T11:03:41,587 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:41,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T11:03:41,594 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:03:41,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742166_1342 (size=412) 2024-11-15T11:03:41,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742166_1342 (size=412) 2024-11-15T11:03:41,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742166_1342 (size=412) 2024-11-15T11:03:41,604 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f10fd8868fb0d2804f72a2dcc431622d, NAME => 'testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:41,604 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 91e3a313b779eaabccd228a0d0b354ab, NAME => 'testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:41,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742168_1344 (size=73) 2024-11-15T11:03:41,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742168_1344 (size=73) 2024-11-15T11:03:41,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742168_1344 (size=73) 2024-11-15T11:03:41,613 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:41,613 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 91e3a313b779eaabccd228a0d0b354ab, disabling compactions & flushes 2024-11-15T11:03:41,613 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:41,613 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:41,613 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. after waiting 0 ms 2024-11-15T11:03:41,613 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:41,613 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:41,613 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 91e3a313b779eaabccd228a0d0b354ab: Waiting for close lock at 1731668621613Disabling compacts and flushes for region at 1731668621613Disabling writes for close at 1731668621613Writing region close event to WAL at 1731668621613Closed at 1731668621613 2024-11-15T11:03:41,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742167_1343 (size=73) 2024-11-15T11:03:41,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742167_1343 (size=73) 2024-11-15T11:03:41,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742167_1343 (size=73) 2024-11-15T11:03:41,618 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:41,619 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing f10fd8868fb0d2804f72a2dcc431622d, disabling compactions & flushes 2024-11-15T11:03:41,619 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:41,619 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:41,619 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. after waiting 0 ms 2024-11-15T11:03:41,619 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:41,619 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:41,619 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for f10fd8868fb0d2804f72a2dcc431622d: Waiting for close lock at 1731668621619Disabling compacts and flushes for region at 1731668621619Disabling writes for close at 1731668621619Writing region close event to WAL at 1731668621619Closed at 1731668621619 2024-11-15T11:03:41,620 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:03:41,620 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731668621620"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668621620"}]},"ts":"1731668621620"} 2024-11-15T11:03:41,620 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731668621620"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668621620"}]},"ts":"1731668621620"} 2024-11-15T11:03:41,622 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:03:41,623 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:03:41,623 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668621623"}]},"ts":"1731668621623"} 2024-11-15T11:03:41,625 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-15T11:03:41,625 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:03:41,626 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:03:41,626 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:03:41,626 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:03:41,626 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:03:41,626 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:03:41,626 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:03:41,626 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:03:41,626 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:03:41,626 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:03:41,626 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:03:41,626 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, ASSIGN}] 2024-11-15T11:03:41,627 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, ASSIGN 2024-11-15T11:03:41,627 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, ASSIGN 2024-11-15T11:03:41,628 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:03:41,628 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:03:41,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T11:03:41,779 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:03:41,779 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=91e3a313b779eaabccd228a0d0b354ab, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:41,779 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=f10fd8868fb0d2804f72a2dcc431622d, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:41,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, ASSIGN because future has completed 2024-11-15T11:03:41,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91e3a313b779eaabccd228a0d0b354ab, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:41,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, ASSIGN because future has completed 2024-11-15T11:03:41,782 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure f10fd8868fb0d2804f72a2dcc431622d, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T11:03:41,936 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:41,936 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => 91e3a313b779eaabccd228a0d0b354ab, NAME => 'testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:03:41,936 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. service=AccessControlService 2024-11-15T11:03:41,937 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => f10fd8868fb0d2804f72a2dcc431622d, NAME => 'testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:03:41,937 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. service=AccessControlService 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,937 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,937 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,938 INFO [StoreOpener-91e3a313b779eaabccd228a0d0b354ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,938 INFO [StoreOpener-f10fd8868fb0d2804f72a2dcc431622d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,940 INFO [StoreOpener-f10fd8868fb0d2804f72a2dcc431622d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f10fd8868fb0d2804f72a2dcc431622d columnFamilyName cf 2024-11-15T11:03:41,940 INFO [StoreOpener-91e3a313b779eaabccd228a0d0b354ab-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 91e3a313b779eaabccd228a0d0b354ab columnFamilyName cf 2024-11-15T11:03:41,940 DEBUG [StoreOpener-f10fd8868fb0d2804f72a2dcc431622d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:41,940 DEBUG [StoreOpener-91e3a313b779eaabccd228a0d0b354ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:41,940 INFO [StoreOpener-f10fd8868fb0d2804f72a2dcc431622d-1 {}] regionserver.HStore(327): Store=f10fd8868fb0d2804f72a2dcc431622d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:41,940 INFO [StoreOpener-91e3a313b779eaabccd228a0d0b354ab-1 {}] regionserver.HStore(327): Store=91e3a313b779eaabccd228a0d0b354ab/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:41,940 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,940 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,941 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,941 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,941 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,941 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,942 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,942 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,942 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,942 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,943 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,943 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,945 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:41,945 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:41,946 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened 91e3a313b779eaabccd228a0d0b354ab; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66100422, jitterRate=-0.015026956796646118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:41,946 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened f10fd8868fb0d2804f72a2dcc431622d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62939510, jitterRate=-0.0621282160282135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:41,946 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:41,946 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:41,947 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for f10fd8868fb0d2804f72a2dcc431622d: Running coprocessor pre-open hook at 1731668621937Writing region info on filesystem at 1731668621937Initializing all the Stores at 1731668621938 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668621938Cleaning up temporary data from old regions at 1731668621942 (+4 ms)Running coprocessor post-open hooks at 1731668621946 (+4 ms)Region opened successfully at 1731668621947 (+1 ms) 2024-11-15T11:03:41,947 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for 91e3a313b779eaabccd228a0d0b354ab: Running coprocessor pre-open hook at 1731668621937Writing region info on filesystem at 1731668621937Initializing all the Stores at 1731668621938 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668621938Cleaning up temporary data from old regions at 1731668621942 (+4 ms)Running coprocessor post-open hooks at 1731668621946 (+4 ms)Region opened successfully at 1731668621947 (+1 ms) 2024-11-15T11:03:41,947 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab., pid=160, masterSystemTime=1731668621933 2024-11-15T11:03:41,948 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d., pid=161, masterSystemTime=1731668621934 2024-11-15T11:03:41,950 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:41,950 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:41,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:03:41,950 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=91e3a313b779eaabccd228a0d0b354ab, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:41,950 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:41,950 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:41,951 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=f10fd8868fb0d2804f72a2dcc431622d, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:41,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91e3a313b779eaabccd228a0d0b354ab, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:41,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure f10fd8868fb0d2804f72a2dcc431622d, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:41,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=158 2024-11-15T11:03:41,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure 91e3a313b779eaabccd228a0d0b354ab, server=4cdab6d79b20,39207,1731668474152 in 172 msec 2024-11-15T11:03:41,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=159 2024-11-15T11:03:41,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure f10fd8868fb0d2804f72a2dcc431622d, server=4cdab6d79b20,45865,1731668473964 in 172 msec 2024-11-15T11:03:41,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, ASSIGN in 329 msec 2024-11-15T11:03:41,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-15T11:03:41,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, ASSIGN in 330 msec 2024-11-15T11:03:41,959 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:03:41,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668621959"}]},"ts":"1731668621959"} 2024-11-15T11:03:41,960 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-15T11:03:41,961 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:03:41,961 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-15T11:03:41,964 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T11:03:41,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:41,969 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:41,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 384 msec 2024-11-15T11:03:42,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T11:03:42,216 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T11:03:42,216 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-11-15T11:03:42,216 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:42,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-11-15T11:03:42,220 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:42,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-11-15T11:03:42,221 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:03:42,223 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:03:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668622223 (current time:1731668622223). 2024-11-15T11:03:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-15T11:03:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10b5e207, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:42,227 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:42,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:42,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:42,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be1d410, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:42,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:42,228 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,228 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56078, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:42,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a3b4532, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:42,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:42,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:42,231 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:42,233 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,233 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e492559, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:42,234 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:42,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:42,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:42,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53767939, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:42,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:42,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,236 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56100, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:42,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@780993ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:42,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:42,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:42,239 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54040, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:42,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:42,242 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465. 2024-11-15T11:03:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,243 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T11:03:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:03:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-15T11:03:42,245 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-15T11:03:42,246 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:42,250 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:42,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742169_1345 (size=185) 2024-11-15T11:03:42,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742169_1345 (size=185) 2024-11-15T11:03:42,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742169_1345 (size=185) 2024-11-15T11:03:42,267 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:42,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d}] 2024-11-15T11:03:42,268 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:42,268 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-15T11:03:42,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-15T11:03:42,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-15T11:03:42,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:42,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for 91e3a313b779eaabccd228a0d0b354ab: 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for f10fd8868fb0d2804f72a2dcc431622d: 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:42,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:42,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742170_1346 (size=76) 2024-11-15T11:03:42,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742171_1347 (size=76) 2024-11-15T11:03:42,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742170_1346 (size=76) 2024-11-15T11:03:42,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742171_1347 (size=76) 2024-11-15T11:03:42,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742170_1346 (size=76) 2024-11-15T11:03:42,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742171_1347 (size=76) 2024-11-15T11:03:42,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:42,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-15T11:03:42,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:42,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-15T11:03:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-15T11:03:42,434 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:42,434 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-15T11:03:42,435 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:42,435 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:42,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab in 168 msec 2024-11-15T11:03:42,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=162 2024-11-15T11:03:42,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d in 169 msec 2024-11-15T11:03:42,438 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:42,438 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:42,439 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:42,439 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,440 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742172_1348 (size=567) 2024-11-15T11:03:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742172_1348 (size=567) 2024-11-15T11:03:42,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742172_1348 (size=567) 2024-11-15T11:03:42,450 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:42,454 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:42,455 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,458 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:42,458 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-15T11:03:42,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 215 msec 2024-11-15T11:03:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-15T11:03:42,566 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T11:03:42,569 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='06cb555d050184902bf2840154ea220fd', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:42,571 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='13dd3c31399f81751f7a767422a39aab5', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:42,572 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='24d31128ffa6e555303bd20a6b638080f', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:42,573 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='363ab6387af23b575defbf4a94f0f2b05', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:42,573 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='4ed204b69f9c366420cdb9b8dae9f451f', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:42,574 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='542c6101150c27fa0aad65d98d7f1a9cd', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:03:42,577 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:42,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:42,579 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:03:42,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-15T11:03:42,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:42,581 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:42,582 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:03:42,586 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:03:42,591 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T11:03:42,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:03:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668622593 (current time:1731668622593). 2024-11-15T11:03:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-15T11:03:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@648dd963, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:42,595 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:42,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:42,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:42,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43168913, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:42,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:42,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,597 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:42,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433ffdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:42,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:42,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:42,600 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:42,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,601 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@327df10b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:42,602 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:42,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:42,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:42,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@646c653a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:42,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:42,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,604 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:42,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33206784, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:42,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:42,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:42,607 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:42,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:42,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:42,609 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T11:03:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T11:03:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-15T11:03:42,612 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-15T11:03:42,613 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:42,615 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:42,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742173_1349 (size=180) 2024-11-15T11:03:42,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742173_1349 (size=180) 2024-11-15T11:03:42,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742173_1349 (size=180) 2024-11-15T11:03:42,621 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:42,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d}] 2024-11-15T11:03:42,622 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:42,622 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-15T11:03:42,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-15T11:03:42,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-15T11:03:42,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:42,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:42,774 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing 91e3a313b779eaabccd228a0d0b354ab 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-15T11:03:42,774 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing f10fd8868fb0d2804f72a2dcc431622d 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-15T11:03:42,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/.tmp/cf/f4534403a7074de2af1a53efa967820f is 71, key is 032c5391b128cbc2539de2473d12a7c1/cf:q/1731668622577/Put/seqid=0 2024-11-15T11:03:42,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/.tmp/cf/69a36f1f3a674cb78b164ff838bacc80 is 71, key is 1072fc074aebd902c9ee2fc38b53ef60/cf:q/1731668622578/Put/seqid=0 2024-11-15T11:03:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742174_1350 (size=5566) 2024-11-15T11:03:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742174_1350 (size=5566) 2024-11-15T11:03:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742175_1351 (size=8052) 2024-11-15T11:03:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742175_1351 (size=8052) 2024-11-15T11:03:42,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742174_1350 (size=5566) 2024-11-15T11:03:42,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742175_1351 (size=8052) 2024-11-15T11:03:42,797 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/.tmp/cf/f4534403a7074de2af1a53efa967820f 2024-11-15T11:03:42,797 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/.tmp/cf/69a36f1f3a674cb78b164ff838bacc80 2024-11-15T11:03:42,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/.tmp/cf/69a36f1f3a674cb78b164ff838bacc80 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/cf/69a36f1f3a674cb78b164ff838bacc80 2024-11-15T11:03:42,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/.tmp/cf/f4534403a7074de2af1a53efa967820f as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/cf/f4534403a7074de2af1a53efa967820f 2024-11-15T11:03:42,808 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/cf/69a36f1f3a674cb78b164ff838bacc80, entries=43, sequenceid=6, filesize=7.9 K 2024-11-15T11:03:42,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/cf/f4534403a7074de2af1a53efa967820f, entries=7, sequenceid=6, filesize=5.4 K 2024-11-15T11:03:42,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for f10fd8868fb0d2804f72a2dcc431622d in 34ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:42,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-15T11:03:42,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for f10fd8868fb0d2804f72a2dcc431622d: 2024-11-15T11:03:42,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-15T11:03:42,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 91e3a313b779eaabccd228a0d0b354ab in 35ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:42,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for 91e3a313b779eaabccd228a0d0b354ab: 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/cf/69a36f1f3a674cb78b164ff838bacc80] hfiles 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/cf/69a36f1f3a674cb78b164ff838bacc80 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/cf/f4534403a7074de2af1a53efa967820f] hfiles 2024-11-15T11:03:42,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/cf/f4534403a7074de2af1a53efa967820f for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742176_1352 (size=115) 2024-11-15T11:03:42,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742177_1353 (size=115) 2024-11-15T11:03:42,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742176_1352 (size=115) 2024-11-15T11:03:42,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742176_1352 (size=115) 2024-11-15T11:03:42,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742177_1353 (size=115) 2024-11-15T11:03:42,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:42,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-15T11:03:42,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742177_1353 (size=115) 2024-11-15T11:03:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-15T11:03:42,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:42,818 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:42,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f10fd8868fb0d2804f72a2dcc431622d in 197 msec 2024-11-15T11:03:42,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:42,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-15T11:03:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-15T11:03:42,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:42,825 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:42,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-15T11:03:42,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 91e3a313b779eaabccd228a0d0b354ab in 205 msec 2024-11-15T11:03:42,827 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:42,828 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:42,829 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:42,829 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,829 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742178_1354 (size=645) 2024-11-15T11:03:42,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742178_1354 (size=645) 2024-11-15T11:03:42,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742178_1354 (size=645) 2024-11-15T11:03:42,844 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:42,848 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:42,848 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,850 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:42,850 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-15T11:03:42,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 240 msec 2024-11-15T11:03:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-15T11:03:42,926 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T11:03:42,926 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926 2024-11-15T11:03:42,926 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:42,954 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:42,954 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,956 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:03:42,959 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:42,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742179_1355 (size=185) 2024-11-15T11:03:42,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742179_1355 (size=185) 2024-11-15T11:03:42,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742179_1355 (size=185) 2024-11-15T11:03:42,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742180_1356 (size=567) 2024-11-15T11:03:42,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742180_1356 (size=567) 2024-11-15T11:03:42,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742180_1356 (size=567) 2024-11-15T11:03:42,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:42,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:42,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:43,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-15T11:03:43,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-15T11:03:43,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-15T11:03:44,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-10760124923334855393.jar 2024-11-15T11:03:44,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-5608183656744342644.jar 2024-11-15T11:03:44,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:44,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:03:44,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:03:44,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:03:44,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:03:44,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:03:44,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:03:44,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:03:44,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:03:44,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:03:44,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:03:44,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:03:44,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:44,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:44,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:44,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:44,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:44,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:44,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:44,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742181_1357 (size=131440) 2024-11-15T11:03:44,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742181_1357 (size=131440) 2024-11-15T11:03:44,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742181_1357 (size=131440) 2024-11-15T11:03:44,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742182_1358 (size=4188619) 2024-11-15T11:03:44,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742182_1358 (size=4188619) 2024-11-15T11:03:44,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742182_1358 (size=4188619) 2024-11-15T11:03:44,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742183_1359 (size=1323991) 2024-11-15T11:03:44,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742183_1359 (size=1323991) 2024-11-15T11:03:44,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742183_1359 (size=1323991) 2024-11-15T11:03:44,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742184_1360 (size=903737) 2024-11-15T11:03:44,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742184_1360 (size=903737) 2024-11-15T11:03:44,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742184_1360 (size=903737) 2024-11-15T11:03:44,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742185_1361 (size=8360083) 2024-11-15T11:03:44,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742185_1361 (size=8360083) 2024-11-15T11:03:44,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742185_1361 (size=8360083) 2024-11-15T11:03:44,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742186_1362 (size=1877034) 2024-11-15T11:03:44,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742186_1362 (size=1877034) 2024-11-15T11:03:44,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742186_1362 (size=1877034) 2024-11-15T11:03:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742187_1363 (size=77835) 2024-11-15T11:03:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742187_1363 (size=77835) 2024-11-15T11:03:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742187_1363 (size=77835) 2024-11-15T11:03:44,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742188_1364 (size=30949) 2024-11-15T11:03:44,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742188_1364 (size=30949) 2024-11-15T11:03:44,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742188_1364 (size=30949) 2024-11-15T11:03:44,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742189_1365 (size=1597327) 2024-11-15T11:03:44,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742189_1365 (size=1597327) 2024-11-15T11:03:44,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742189_1365 (size=1597327) 2024-11-15T11:03:44,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742190_1366 (size=6424745) 2024-11-15T11:03:44,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742190_1366 (size=6424745) 2024-11-15T11:03:44,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742190_1366 (size=6424745) 2024-11-15T11:03:44,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742191_1367 (size=4695811) 2024-11-15T11:03:44,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742191_1367 (size=4695811) 2024-11-15T11:03:44,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742191_1367 (size=4695811) 2024-11-15T11:03:44,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742192_1368 (size=440656) 2024-11-15T11:03:44,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742192_1368 (size=440656) 2024-11-15T11:03:44,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742192_1368 (size=440656) 2024-11-15T11:03:44,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742193_1369 (size=232957) 2024-11-15T11:03:44,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742193_1369 (size=232957) 2024-11-15T11:03:44,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742193_1369 (size=232957) 2024-11-15T11:03:44,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742194_1370 (size=127628) 2024-11-15T11:03:44,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742194_1370 (size=127628) 2024-11-15T11:03:44,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742194_1370 (size=127628) 2024-11-15T11:03:44,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742195_1371 (size=20406) 2024-11-15T11:03:44,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742195_1371 (size=20406) 2024-11-15T11:03:44,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742195_1371 (size=20406) 2024-11-15T11:03:45,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742196_1372 (size=5175431) 2024-11-15T11:03:45,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742196_1372 (size=5175431) 2024-11-15T11:03:45,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742196_1372 (size=5175431) 2024-11-15T11:03:45,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742197_1373 (size=217634) 2024-11-15T11:03:45,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742197_1373 (size=217634) 2024-11-15T11:03:45,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742197_1373 (size=217634) 2024-11-15T11:03:45,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742198_1374 (size=1832290) 2024-11-15T11:03:45,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742198_1374 (size=1832290) 2024-11-15T11:03:45,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742198_1374 (size=1832290) 2024-11-15T11:03:45,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742199_1375 (size=322274) 2024-11-15T11:03:45,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742199_1375 (size=322274) 2024-11-15T11:03:45,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742199_1375 (size=322274) 2024-11-15T11:03:45,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742200_1376 (size=503880) 2024-11-15T11:03:45,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742200_1376 (size=503880) 2024-11-15T11:03:45,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742200_1376 (size=503880) 2024-11-15T11:03:45,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742201_1377 (size=29229) 2024-11-15T11:03:45,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742201_1377 (size=29229) 2024-11-15T11:03:45,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742201_1377 (size=29229) 2024-11-15T11:03:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742202_1378 (size=24096) 2024-11-15T11:03:45,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742202_1378 (size=24096) 2024-11-15T11:03:45,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742202_1378 (size=24096) 2024-11-15T11:03:45,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742203_1379 (size=111872) 2024-11-15T11:03:45,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742203_1379 (size=111872) 2024-11-15T11:03:45,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742203_1379 (size=111872) 2024-11-15T11:03:45,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742204_1380 (size=45609) 2024-11-15T11:03:45,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742204_1380 (size=45609) 2024-11-15T11:03:45,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742204_1380 (size=45609) 2024-11-15T11:03:45,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742205_1381 (size=136454) 2024-11-15T11:03:45,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742205_1381 (size=136454) 2024-11-15T11:03:45,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742205_1381 (size=136454) 2024-11-15T11:03:45,089 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:03:45,091 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-15T11:03:45,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742206_1382 (size=7) 2024-11-15T11:03:45,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742206_1382 (size=7) 2024-11-15T11:03:45,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742206_1382 (size=7) 2024-11-15T11:03:45,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742207_1383 (size=10) 2024-11-15T11:03:45,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742207_1383 (size=10) 2024-11-15T11:03:45,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742207_1383 (size=10) 2024-11-15T11:03:45,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742208_1384 (size=303637) 2024-11-15T11:03:45,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742208_1384 (size=303637) 2024-11-15T11:03:45,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742208_1384 (size=303637) 2024-11-15T11:03:45,136 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:03:45,136 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:03:45,754 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0007_000001 (auth:SIMPLE) from 127.0.0.1:58892 2024-11-15T11:03:46,771 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:03:51,512 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0007_000001 (auth:SIMPLE) from 127.0.0.1:53892 2024-11-15T11:03:51,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742209_1385 (size=349263) 2024-11-15T11:03:51,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742209_1385 (size=349263) 2024-11-15T11:03:51,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742209_1385 (size=349263) 2024-11-15T11:03:52,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742210_1386 (size=8568) 2024-11-15T11:03:52,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742210_1386 (size=8568) 2024-11-15T11:03:52,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742210_1386 (size=8568) 2024-11-15T11:03:52,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742211_1387 (size=460) 2024-11-15T11:03:52,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742211_1387 (size=460) 2024-11-15T11:03:52,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742211_1387 (size=460) 2024-11-15T11:03:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742212_1388 (size=8568) 2024-11-15T11:03:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742212_1388 (size=8568) 2024-11-15T11:03:52,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742212_1388 (size=8568) 2024-11-15T11:03:52,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742213_1389 (size=349263) 2024-11-15T11:03:52,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742213_1389 (size=349263) 2024-11-15T11:03:52,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742213_1389 (size=349263) 2024-11-15T11:03:54,253 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:03:54,254 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:03:54,260 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:54,260 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:03:54,261 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:03:54,261 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:54,261 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-15T11:03:54,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-15T11:03:54,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:54,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-15T11:03:54,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668622926/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-15T11:03:54,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-15T11:03:54,272 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668634272"}]},"ts":"1731668634272"} 2024-11-15T11:03:54,274 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-15T11:03:54,274 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-15T11:03:54,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-15T11:03:54,276 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, UNASSIGN}] 2024-11-15T11:03:54,277 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, UNASSIGN 2024-11-15T11:03:54,278 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, UNASSIGN 2024-11-15T11:03:54,278 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=f10fd8868fb0d2804f72a2dcc431622d, regionState=CLOSING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:03:54,278 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=91e3a313b779eaabccd228a0d0b354ab, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:54,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, UNASSIGN because future has completed 2024-11-15T11:03:54,280 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:54,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure f10fd8868fb0d2804f72a2dcc431622d, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:03:54,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, UNASSIGN because future has completed 2024-11-15T11:03:54,282 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:03:54,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 91e3a313b779eaabccd228a0d0b354ab, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:54,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-15T11:03:54,433 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:54,434 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing f10fd8868fb0d2804f72a2dcc431622d, disabling compactions & flushes 2024-11-15T11:03:54,434 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing 91e3a313b779eaabccd228a0d0b354ab, disabling compactions & flushes 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. after waiting 0 ms 2024-11-15T11:03:54,434 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. after waiting 0 ms 2024-11-15T11:03:54,434 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:54,439 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:54,440 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:03:54,440 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:54,441 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d. 2024-11-15T11:03:54,441 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for f10fd8868fb0d2804f72a2dcc431622d: Waiting for close lock at 1731668634434Running coprocessor pre-close hooks at 1731668634434Disabling compacts and flushes for region at 1731668634434Disabling writes for close at 1731668634434Writing region close event to WAL at 1731668634435 (+1 ms)Running coprocessor post-close hooks at 1731668634440 (+5 ms)Closed at 1731668634441 (+1 ms) 2024-11-15T11:03:54,441 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:03:54,441 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab. 2024-11-15T11:03:54,441 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for 91e3a313b779eaabccd228a0d0b354ab: Waiting for close lock at 1731668634434Running coprocessor pre-close hooks at 1731668634434Disabling compacts and flushes for region at 1731668634434Disabling writes for close at 1731668634434Writing region close event to WAL at 1731668634435 (+1 ms)Running coprocessor post-close hooks at 1731668634441 (+6 ms)Closed at 1731668634441 2024-11-15T11:03:54,443 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=f10fd8868fb0d2804f72a2dcc431622d, regionState=CLOSED 2024-11-15T11:03:54,444 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:54,445 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed 91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:54,445 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=91e3a313b779eaabccd228a0d0b354ab, regionState=CLOSED 2024-11-15T11:03:54,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure f10fd8868fb0d2804f72a2dcc431622d, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:03:54,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 91e3a313b779eaabccd228a0d0b354ab, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:54,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-15T11:03:54,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure f10fd8868fb0d2804f72a2dcc431622d, server=4cdab6d79b20,45865,1731668473964 in 166 msec 2024-11-15T11:03:54,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=f10fd8868fb0d2804f72a2dcc431622d, UNASSIGN in 172 msec 2024-11-15T11:03:54,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-15T11:03:54,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure 91e3a313b779eaabccd228a0d0b354ab, server=4cdab6d79b20,39207,1731668474152 in 166 msec 2024-11-15T11:03:54,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-15T11:03:54,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=91e3a313b779eaabccd228a0d0b354ab, UNASSIGN in 174 msec 2024-11-15T11:03:54,454 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-15T11:03:54,454 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 178 msec 2024-11-15T11:03:54,456 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668634455"}]},"ts":"1731668634455"} 2024-11-15T11:03:54,457 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-15T11:03:54,457 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-15T11:03:54,459 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 190 msec 2024-11-15T11:03:54,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-15T11:03:54,585 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T11:03:54,586 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,588 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,589 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,593 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,593 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:54,594 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:54,596 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/recovered.edits] 2024-11-15T11:03:54,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T11:03:54,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T11:03:54,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T11:03:54,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:54,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:54,602 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:54,602 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/recovered.edits] 2024-11-15T11:03:54,603 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:54,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-15T11:03:54,603 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:54,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:54,607 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/cf/69a36f1f3a674cb78b164ff838bacc80 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/cf/69a36f1f3a674cb78b164ff838bacc80 2024-11-15T11:03:54,611 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/cf/f4534403a7074de2af1a53efa967820f to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/cf/f4534403a7074de2af1a53efa967820f 2024-11-15T11:03:54,614 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d/recovered.edits/9.seqid 2024-11-15T11:03:54,614 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/f10fd8868fb0d2804f72a2dcc431622d 2024-11-15T11:03:54,616 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab/recovered.edits/9.seqid 2024-11-15T11:03:54,616 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testEmptyExportFileSystemState/91e3a313b779eaabccd228a0d0b354ab 2024-11-15T11:03:54,617 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-15T11:03:54,620 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,624 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-15T11:03:54,627 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-15T11:03:54,628 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,628 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-15T11:03:54,629 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668634628"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:54,629 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668634628"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:54,632 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:03:54,633 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 91e3a313b779eaabccd228a0d0b354ab, NAME => 'testtb-testEmptyExportFileSystemState,,1731668621585.91e3a313b779eaabccd228a0d0b354ab.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f10fd8868fb0d2804f72a2dcc431622d, NAME => 'testtb-testEmptyExportFileSystemState,1,1731668621585.f10fd8868fb0d2804f72a2dcc431622d.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:03:54,633 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-15T11:03:54,633 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668634633"}]},"ts":"9223372036854775807"} 2024-11-15T11:03:54,636 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-15T11:03:54,637 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 51 msec 2024-11-15T11:03:54,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-15T11:03:54,706 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-15T11:03:54,706 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T11:03:54,716 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-15T11:03:54,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:54,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-15T11:03:54,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-15T11:03:54,753 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=805 (was 795) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:57716 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:38097 from appattempt_1731668480794_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1686202993_1 at /127.0.0.1:57678 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:38868 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3225) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:48410 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44945 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5416 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:44945 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=812 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=410 (was 400) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=2546 (was 1497) - AvailableMemoryMB LEAK? - 2024-11-15T11:03:54,754 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-15T11:03:54,782 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=805, OpenFileDescriptor=812, MaxFileDescriptor=1048576, SystemLoadAverage=410, ProcessCount=17, AvailableMemoryMB=2542 2024-11-15T11:03:54,782 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-15T11:03:54,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:03:54,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:03:54,788 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:03:54,788 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:54,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-15T11:03:54,792 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:03:54,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T11:03:54,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742214_1390 (size=404) 2024-11-15T11:03:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742214_1390 (size=404) 2024-11-15T11:03:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742214_1390 (size=404) 2024-11-15T11:03:54,849 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 83fdacd28f9e0e342ef078be16ea2663, NAME => 'testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:54,849 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2764bea576bfa200194abca20722d84f, NAME => 'testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:54,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742215_1391 (size=65) 2024-11-15T11:03:54,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742215_1391 (size=65) 2024-11-15T11:03:54,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742215_1391 (size=65) 2024-11-15T11:03:54,873 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:54,873 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 83fdacd28f9e0e342ef078be16ea2663, disabling compactions & flushes 2024-11-15T11:03:54,873 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:54,873 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:54,873 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. after waiting 0 ms 2024-11-15T11:03:54,873 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:54,873 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:54,873 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 83fdacd28f9e0e342ef078be16ea2663: Waiting for close lock at 1731668634873Disabling compacts and flushes for region at 1731668634873Disabling writes for close at 1731668634873Writing region close event to WAL at 1731668634873Closed at 1731668634873 2024-11-15T11:03:54,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742216_1392 (size=65) 2024-11-15T11:03:54,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742216_1392 (size=65) 2024-11-15T11:03:54,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742216_1392 (size=65) 2024-11-15T11:03:54,883 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:54,883 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 2764bea576bfa200194abca20722d84f, disabling compactions & flushes 2024-11-15T11:03:54,883 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:54,883 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:54,883 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. after waiting 0 ms 2024-11-15T11:03:54,883 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:54,883 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:54,883 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2764bea576bfa200194abca20722d84f: Waiting for close lock at 1731668634883Disabling compacts and flushes for region at 1731668634883Disabling writes for close at 1731668634883Writing region close event to WAL at 1731668634883Closed at 1731668634883 2024-11-15T11:03:54,884 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:03:54,885 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731668634884"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668634884"}]},"ts":"1731668634884"} 2024-11-15T11:03:54,885 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731668634884"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668634884"}]},"ts":"1731668634884"} 2024-11-15T11:03:54,888 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:03:54,889 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:03:54,889 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668634889"}]},"ts":"1731668634889"} 2024-11-15T11:03:54,891 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-15T11:03:54,892 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:03:54,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:03:54,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:03:54,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:03:54,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:03:54,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:03:54,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:03:54,893 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:03:54,893 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:03:54,894 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:03:54,894 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:03:54,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, ASSIGN}] 2024-11-15T11:03:54,897 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, ASSIGN 2024-11-15T11:03:54,898 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, ASSIGN 2024-11-15T11:03:54,903 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, ASSIGN; state=OFFLINE, location=4cdab6d79b20,36113,1731668474097; forceNewPlan=false, retain=false 2024-11-15T11:03:54,903 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:03:54,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T11:03:55,053 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:03:55,054 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=83fdacd28f9e0e342ef078be16ea2663, regionState=OPENING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:03:55,054 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=2764bea576bfa200194abca20722d84f, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:55,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, ASSIGN because future has completed 2024-11-15T11:03:55,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83fdacd28f9e0e342ef078be16ea2663, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:03:55,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, ASSIGN because future has completed 2024-11-15T11:03:55,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2764bea576bfa200194abca20722d84f, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:03:55,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T11:03:55,219 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:55,219 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => 2764bea576bfa200194abca20722d84f, NAME => 'testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:03:55,219 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:55,219 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => 83fdacd28f9e0e342ef078be16ea2663, NAME => 'testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:03:55,219 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. service=AccessControlService 2024-11-15T11:03:55,219 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. service=AccessControlService 2024-11-15T11:03:55,220 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,220 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,220 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,222 INFO [StoreOpener-2764bea576bfa200194abca20722d84f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,223 INFO [StoreOpener-2764bea576bfa200194abca20722d84f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2764bea576bfa200194abca20722d84f columnFamilyName cf 2024-11-15T11:03:55,224 DEBUG [StoreOpener-2764bea576bfa200194abca20722d84f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:55,224 INFO [StoreOpener-83fdacd28f9e0e342ef078be16ea2663-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,224 INFO [StoreOpener-2764bea576bfa200194abca20722d84f-1 {}] regionserver.HStore(327): Store=2764bea576bfa200194abca20722d84f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:55,224 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,225 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,226 INFO [StoreOpener-83fdacd28f9e0e342ef078be16ea2663-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83fdacd28f9e0e342ef078be16ea2663 columnFamilyName cf 2024-11-15T11:03:55,226 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,226 DEBUG [StoreOpener-83fdacd28f9e0e342ef078be16ea2663-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:03:55,226 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,226 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,226 INFO [StoreOpener-83fdacd28f9e0e342ef078be16ea2663-1 {}] regionserver.HStore(327): Store=83fdacd28f9e0e342ef078be16ea2663/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:03:55,226 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,227 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,228 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,228 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,228 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,229 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,231 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,231 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:55,231 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened 2764bea576bfa200194abca20722d84f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59549603, jitterRate=-0.11264176666736603}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:55,231 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,232 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for 2764bea576bfa200194abca20722d84f: Running coprocessor pre-open hook at 1731668635220Writing region info on filesystem at 1731668635220Initializing all the Stores at 1731668635221 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668635221Cleaning up temporary data from old regions at 1731668635226 (+5 ms)Running coprocessor post-open hooks at 1731668635231 (+5 ms)Region opened successfully at 1731668635232 (+1 ms) 2024-11-15T11:03:55,233 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f., pid=179, masterSystemTime=1731668635215 2024-11-15T11:03:55,234 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:03:55,234 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened 83fdacd28f9e0e342ef078be16ea2663; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63612431, jitterRate=-0.05210091173648834}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:03:55,235 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,235 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for 83fdacd28f9e0e342ef078be16ea2663: Running coprocessor pre-open hook at 1731668635220Writing region info on filesystem at 1731668635220Initializing all the Stores at 1731668635222 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668635222Cleaning up temporary data from old regions at 1731668635229 (+7 ms)Running coprocessor post-open hooks at 1731668635235 (+6 ms)Region opened successfully at 1731668635235 2024-11-15T11:03:55,236 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663., pid=178, masterSystemTime=1731668635215 2024-11-15T11:03:55,236 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:55,236 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:55,237 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=2764bea576bfa200194abca20722d84f, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:03:55,239 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=83fdacd28f9e0e342ef078be16ea2663, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:03:55,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2764bea576bfa200194abca20722d84f, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:03:55,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83fdacd28f9e0e342ef078be16ea2663, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:03:55,241 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:55,241 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:55,243 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-15T11:03:55,243 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure 2764bea576bfa200194abca20722d84f, server=4cdab6d79b20,39207,1731668474152 in 178 msec 2024-11-15T11:03:55,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=176 2024-11-15T11:03:55,245 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, ASSIGN in 349 msec 2024-11-15T11:03:55,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure 83fdacd28f9e0e342ef078be16ea2663, server=4cdab6d79b20,36113,1731668474097 in 180 msec 2024-11-15T11:03:55,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=175 2024-11-15T11:03:55,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, ASSIGN in 351 msec 2024-11-15T11:03:55,248 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:03:55,249 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668635248"}]},"ts":"1731668635248"} 2024-11-15T11:03:55,252 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-15T11:03:55,253 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:03:55,253 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-15T11:03:55,256 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-15T11:03:55,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:55,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:55,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:55,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:03:55,267 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,268 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,268 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,268 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,268 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,270 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,270 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T11:03:55,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 482 msec 2024-11-15T11:03:55,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T11:03:55,425 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T11:03:55,425 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-11-15T11:03:55,425 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:55,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-11-15T11:03:55,430 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:55,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-11-15T11:03:55,430 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T11:03:55,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T11:03:55,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668635433 (current time:1731668635433). 2024-11-15T11:03:55,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:55,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-15T11:03:55,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17902adb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:55,436 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:55,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:55,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:55,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33263086, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:55,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:55,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,438 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:55,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49873e86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:55,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:55,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:55,441 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:55,442 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,442 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e045752, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:55,444 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:55,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:55,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:55,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d041239, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:55,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:55,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,446 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:55,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@269b4c19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:55,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:55,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:55,449 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57994, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:55,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-15T11:03:55,453 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:55,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T11:03:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-15T11:03:55,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-15T11:03:55,456 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:55,457 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:55,459 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:55,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742217_1393 (size=161) 2024-11-15T11:03:55,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742217_1393 (size=161) 2024-11-15T11:03:55,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742217_1393 (size=161) 2024-11-15T11:03:55,469 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:55,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f}] 2024-11-15T11:03:55,470 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,470 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-15T11:03:55,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-15T11:03:55,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-15T11:03:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for 83fdacd28f9e0e342ef078be16ea2663: 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for 2764bea576bfa200194abca20722d84f: 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. for emptySnaptb0-testExportWithChecksum completed. 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. for emptySnaptb0-testExportWithChecksum completed. 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:03:55,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742219_1395 (size=68) 2024-11-15T11:03:55,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742219_1395 (size=68) 2024-11-15T11:03:55,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742218_1394 (size=68) 2024-11-15T11:03:55,643 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:55,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742219_1395 (size=68) 2024-11-15T11:03:55,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742218_1394 (size=68) 2024-11-15T11:03:55,643 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-15T11:03:55,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742218_1394 (size=68) 2024-11-15T11:03:55,643 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:55,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-15T11:03:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-15T11:03:55,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,644 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-15T11:03:55,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,644 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f in 176 msec 2024-11-15T11:03:55,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-11-15T11:03:55,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663 in 176 msec 2024-11-15T11:03:55,648 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:55,649 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:55,650 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:55,650 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-15T11:03:55,651 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-15T11:03:55,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742220_1396 (size=543) 2024-11-15T11:03:55,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742220_1396 (size=543) 2024-11-15T11:03:55,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742220_1396 (size=543) 2024-11-15T11:03:55,668 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:55,673 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:55,674 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-15T11:03:55,675 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:55,675 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-15T11:03:55,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 222 msec 2024-11-15T11:03:55,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-15T11:03:55,775 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T11:03:55,784 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='00c62597532259f6e886a958de3510a07', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663., hostname=4cdab6d79b20,36113,1731668474097, seqNum=2] 2024-11-15T11:03:55,786 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='11b57a9c10b1f981d5bb23a94a629deb8', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:55,787 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='29faa23148c855c085e9e5e678ffaeb79', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:55,788 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='350918d02112d4c30ab90dcbfb216e78c', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:55,789 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4fbe1c60cda3b111683709e39fd5f9862', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:55,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36113 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:55,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:03:55,800 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T11:03:55,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-15T11:03:55,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:55,803 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:03:55,805 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T11:03:55,809 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T11:03:55,815 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T11:03:55,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T11:03:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668635817 (current time:1731668635817). 2024-11-15T11:03:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:03:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-15T11:03:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:03:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@316802e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:55,819 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:55,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:55,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:55,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2feaf043, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:55,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:55,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,821 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:55,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b38a17c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:55,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:55,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:55,824 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58006, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:55,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,825 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1b0df9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:03:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:03:55,829 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:03:55,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:03:55,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:03:55,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3996d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:03:55,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:03:55,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,831 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41432, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:03:55,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@87d75f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:03:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:03:55,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:03:55,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:03:55,834 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:03:55,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:03:55,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:03:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:03:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:03:55,838 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:03:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-15T11:03:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:03:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T11:03:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-15T11:03:55,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T11:03:55,842 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:03:55,843 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:03:55,845 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:03:55,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742221_1397 (size=156) 2024-11-15T11:03:55,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742221_1397 (size=156) 2024-11-15T11:03:55,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742221_1397 (size=156) 2024-11-15T11:03:55,857 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:03:55,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f}] 2024-11-15T11:03:55,859 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:55,859 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:55,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T11:03:56,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36113 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-15T11:03:56,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-15T11:03:56,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:56,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:56,012 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing 83fdacd28f9e0e342ef078be16ea2663 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-15T11:03:56,012 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing 2764bea576bfa200194abca20722d84f 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-15T11:03:56,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/.tmp/cf/da0ba667545d41beb17cf3f3341eecc0 is 71, key is 141cc1da3ff3398e522fdb7ee1ef68ea/cf:q/1731668635798/Put/seqid=0 2024-11-15T11:03:56,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/.tmp/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38 is 71, key is 0bc2a4797c565114685223e6f94be423/cf:q/1731668635795/Put/seqid=0 2024-11-15T11:03:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742222_1398 (size=8394) 2024-11-15T11:03:56,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742222_1398 (size=8394) 2024-11-15T11:03:56,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742222_1398 (size=8394) 2024-11-15T11:03:56,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/.tmp/cf/da0ba667545d41beb17cf3f3341eecc0 2024-11-15T11:03:56,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/.tmp/cf/da0ba667545d41beb17cf3f3341eecc0 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0 2024-11-15T11:03:56,096 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0, entries=48, sequenceid=6, filesize=8.2 K 2024-11-15T11:03:56,098 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 2764bea576bfa200194abca20722d84f in 86ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:56,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-15T11:03:56,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for 2764bea576bfa200194abca20722d84f: 2024-11-15T11:03:56,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. for snaptb0-testExportWithChecksum completed. 2024-11-15T11:03:56,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-15T11:03:56,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:56,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0] hfiles 2024-11-15T11:03:56,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0 for snapshot=snaptb0-testExportWithChecksum 2024-11-15T11:03:56,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742223_1399 (size=5216) 2024-11-15T11:03:56,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742223_1399 (size=5216) 2024-11-15T11:03:56,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742223_1399 (size=5216) 2024-11-15T11:03:56,107 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/.tmp/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38 2024-11-15T11:03:56,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/.tmp/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38 2024-11-15T11:03:56,117 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38, entries=2, sequenceid=6, filesize=5.1 K 2024-11-15T11:03:56,118 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 83fdacd28f9e0e342ef078be16ea2663 in 106ms, sequenceid=6, compaction requested=false 2024-11-15T11:03:56,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for 83fdacd28f9e0e342ef078be16ea2663: 2024-11-15T11:03:56,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. for snaptb0-testExportWithChecksum completed. 2024-11-15T11:03:56,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-15T11:03:56,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:03:56,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38] hfiles 2024-11-15T11:03:56,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38 for snapshot=snaptb0-testExportWithChecksum 2024-11-15T11:03:56,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742224_1400 (size=107) 2024-11-15T11:03:56,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742224_1400 (size=107) 2024-11-15T11:03:56,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742224_1400 (size=107) 2024-11-15T11:03:56,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:03:56,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-15T11:03:56,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-15T11:03:56,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:56,144 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f 2024-11-15T11:03:56,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2764bea576bfa200194abca20722d84f in 287 msec 2024-11-15T11:03:56,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T11:03:56,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742225_1401 (size=107) 2024-11-15T11:03:56,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742225_1401 (size=107) 2024-11-15T11:03:56,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742225_1401 (size=107) 2024-11-15T11:03:56,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:03:56,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-15T11:03:56,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-15T11:03:56,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:56,169 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:03:56,175 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:03:56,176 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=183 2024-11-15T11:03:56,176 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:03:56,177 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:03:56,177 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-15T11:03:56,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 83fdacd28f9e0e342ef078be16ea2663 in 312 msec 2024-11-15T11:03:56,178 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T11:03:56,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742226_1402 (size=621) 2024-11-15T11:03:56,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742226_1402 (size=621) 2024-11-15T11:03:56,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742226_1402 (size=621) 2024-11-15T11:03:56,226 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:03:56,234 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:03:56,234 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-15T11:03:56,236 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:03:56,236 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-15T11:03:56,238 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 397 msec 2024-11-15T11:03:56,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T11:03:56,466 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T11:03:56,466 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466 2024-11-15T11:03:56,466 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:56,513 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:03:56,513 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@3cf44f9e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T11:03:56,516 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:03:56,529 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T11:03:56,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:56,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:56,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,794 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-14938518435086948333.jar 2024-11-15T11:03:57,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-13476920684997529818.jar 2024-11-15T11:03:57,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:03:57,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:03:57,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:03:57,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:03:57,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:03:57,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:03:57,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:03:57,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:03:57,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:03:57,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:03:57,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:03:57,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:03:57,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:57,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:57,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:57,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:57,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:03:57,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:57,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:03:57,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742227_1403 (size=131440) 2024-11-15T11:03:57,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742227_1403 (size=131440) 2024-11-15T11:03:57,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742227_1403 (size=131440) 2024-11-15T11:03:57,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742228_1404 (size=4188619) 2024-11-15T11:03:57,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742228_1404 (size=4188619) 2024-11-15T11:03:57,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742228_1404 (size=4188619) 2024-11-15T11:03:57,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742229_1405 (size=1323991) 2024-11-15T11:03:57,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742229_1405 (size=1323991) 2024-11-15T11:03:57,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742229_1405 (size=1323991) 2024-11-15T11:03:57,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742230_1406 (size=903737) 2024-11-15T11:03:57,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742230_1406 (size=903737) 2024-11-15T11:03:57,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742230_1406 (size=903737) 2024-11-15T11:03:58,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742231_1407 (size=8360083) 2024-11-15T11:03:58,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742231_1407 (size=8360083) 2024-11-15T11:03:58,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742231_1407 (size=8360083) 2024-11-15T11:03:58,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742232_1408 (size=1877034) 2024-11-15T11:03:58,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742232_1408 (size=1877034) 2024-11-15T11:03:58,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742232_1408 (size=1877034) 2024-11-15T11:03:58,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742233_1409 (size=77835) 2024-11-15T11:03:58,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742233_1409 (size=77835) 2024-11-15T11:03:58,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742233_1409 (size=77835) 2024-11-15T11:03:58,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742234_1410 (size=30949) 2024-11-15T11:03:58,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742234_1410 (size=30949) 2024-11-15T11:03:58,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742234_1410 (size=30949) 2024-11-15T11:03:58,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742235_1411 (size=1597327) 2024-11-15T11:03:58,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742235_1411 (size=1597327) 2024-11-15T11:03:58,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742235_1411 (size=1597327) 2024-11-15T11:03:58,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742236_1412 (size=4695811) 2024-11-15T11:03:58,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742236_1412 (size=4695811) 2024-11-15T11:03:58,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742236_1412 (size=4695811) 2024-11-15T11:03:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742237_1413 (size=232957) 2024-11-15T11:03:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742237_1413 (size=232957) 2024-11-15T11:03:58,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742237_1413 (size=232957) 2024-11-15T11:03:58,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742238_1414 (size=127628) 2024-11-15T11:03:58,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742238_1414 (size=127628) 2024-11-15T11:03:58,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742238_1414 (size=127628) 2024-11-15T11:03:58,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742239_1415 (size=20406) 2024-11-15T11:03:58,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742239_1415 (size=20406) 2024-11-15T11:03:58,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742239_1415 (size=20406) 2024-11-15T11:03:58,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742240_1416 (size=6424745) 2024-11-15T11:03:58,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742240_1416 (size=6424745) 2024-11-15T11:03:58,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742240_1416 (size=6424745) 2024-11-15T11:03:58,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742241_1417 (size=5175431) 2024-11-15T11:03:58,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742241_1417 (size=5175431) 2024-11-15T11:03:58,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742241_1417 (size=5175431) 2024-11-15T11:03:58,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742242_1418 (size=217634) 2024-11-15T11:03:58,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742242_1418 (size=217634) 2024-11-15T11:03:58,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742242_1418 (size=217634) 2024-11-15T11:03:58,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742243_1419 (size=1832290) 2024-11-15T11:03:58,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742243_1419 (size=1832290) 2024-11-15T11:03:58,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742243_1419 (size=1832290) 2024-11-15T11:03:58,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742244_1420 (size=322274) 2024-11-15T11:03:58,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742244_1420 (size=322274) 2024-11-15T11:03:58,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742244_1420 (size=322274) 2024-11-15T11:03:58,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742245_1421 (size=503880) 2024-11-15T11:03:58,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742245_1421 (size=503880) 2024-11-15T11:03:58,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742245_1421 (size=503880) 2024-11-15T11:03:58,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742246_1422 (size=29229) 2024-11-15T11:03:58,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742246_1422 (size=29229) 2024-11-15T11:03:58,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742246_1422 (size=29229) 2024-11-15T11:03:58,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742247_1423 (size=440656) 2024-11-15T11:03:58,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742247_1423 (size=440656) 2024-11-15T11:03:58,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742247_1423 (size=440656) 2024-11-15T11:03:58,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742248_1424 (size=24096) 2024-11-15T11:03:58,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742248_1424 (size=24096) 2024-11-15T11:03:58,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742248_1424 (size=24096) 2024-11-15T11:03:58,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742249_1425 (size=111872) 2024-11-15T11:03:58,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742249_1425 (size=111872) 2024-11-15T11:03:58,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742249_1425 (size=111872) 2024-11-15T11:03:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742250_1426 (size=45609) 2024-11-15T11:03:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742250_1426 (size=45609) 2024-11-15T11:03:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742250_1426 (size=45609) 2024-11-15T11:03:58,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742251_1427 (size=136454) 2024-11-15T11:03:58,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742251_1427 (size=136454) 2024-11-15T11:03:58,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742251_1427 (size=136454) 2024-11-15T11:03:58,765 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:03:58,768 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-15T11:03:58,770 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:03:58,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742252_1428 (size=338) 2024-11-15T11:03:58,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742252_1428 (size=338) 2024-11-15T11:03:58,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742252_1428 (size=338) 2024-11-15T11:03:58,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742253_1429 (size=15) 2024-11-15T11:03:58,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742253_1429 (size=15) 2024-11-15T11:03:58,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742253_1429 (size=15) 2024-11-15T11:03:58,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0007_000001 (auth:SIMPLE) from 127.0.0.1:59380 2024-11-15T11:03:58,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742254_1430 (size=303780) 2024-11-15T11:03:58,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742254_1430 (size=303780) 2024-11-15T11:03:58,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742254_1430 (size=303780) 2024-11-15T11:03:58,851 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0007/container_1731668480794_0007_01_000001/launch_container.sh] 2024-11-15T11:03:58,851 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0007/container_1731668480794_0007_01_000001/container_tokens] 2024-11-15T11:03:58,851 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_1/usercache/jenkins/appcache/application_1731668480794_0007/container_1731668480794_0007_01_000001/sysfs] 2024-11-15T11:03:58,862 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:03:58,862 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:03:58,867 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:36520 2024-11-15T11:03:59,978 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:04:03,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-15T11:04:03,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-15T11:04:03,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-15T11:04:04,747 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:48594 2024-11-15T11:04:05,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742255_1431 (size=349430) 2024-11-15T11:04:05,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742255_1431 (size=349430) 2024-11-15T11:04:05,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742255_1431 (size=349430) 2024-11-15T11:04:07,053 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:45330 2024-11-15T11:04:09,090 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:04:11,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:04:12,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000002/launch_container.sh] 2024-11-15T11:04:12,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000002/container_tokens] 2024-11-15T11:04:12,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466/archive/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-15T11:04:13,864 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:43182 2024-11-15T11:04:15,371 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 26514d1e32bf69240f59f1d352a7ad1d, had cached 0 bytes from a total of 5566 2024-11-15T11:04:15,371 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 62aaee921674ca29eb5bfbb30b16733a, had cached 0 bytes from a total of 8052 2024-11-15T11:04:17,770 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_3/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000003/launch_container.sh] 2024-11-15T11:04:17,770 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_3/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000003/container_tokens] 2024-11-15T11:04:17,770 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_3/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000003/sysfs] 2024-11-15T11:04:18,043 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 83fdacd28f9e0e342ef078be16ea2663 changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:04:18,043 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 26514d1e32bf69240f59f1d352a7ad1d changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:04:18,043 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 2764bea576bfa200194abca20722d84f changed from -1.0 to 0.0, refreshing cache 2024-11-15T11:04:18,043 DEBUG [master/4cdab6d79b20:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 62aaee921674ca29eb5bfbb30b16733a changed from -1.0 to 0.0, refreshing cache Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466/archive/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-15T11:04:18,884 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:50328 2024-11-15T11:04:22,429 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000004/launch_container.sh] 2024-11-15T11:04:22,429 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000004/container_tokens] 2024-11-15T11:04:22,430 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/local-export-1731668636466/archive/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-15T11:04:23,897 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:48490 2024-11-15T11:04:27,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742256_1432 (size=21330) 2024-11-15T11:04:27,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742256_1432 (size=21330) 2024-11-15T11:04:27,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742256_1432 (size=21330) 2024-11-15T11:04:27,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742257_1433 (size=460) 2024-11-15T11:04:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742257_1433 (size=460) 2024-11-15T11:04:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742257_1433 (size=460) 2024-11-15T11:04:27,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742258_1434 (size=21330) 2024-11-15T11:04:27,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742258_1434 (size=21330) 2024-11-15T11:04:27,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742258_1434 (size=21330) 2024-11-15T11:04:27,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742259_1435 (size=349430) 2024-11-15T11:04:27,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742259_1435 (size=349430) 2024-11-15T11:04:27,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742259_1435 (size=349430) 2024-11-15T11:04:27,522 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000005/launch_container.sh] 2024-11-15T11:04:27,522 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000005/container_tokens] 2024-11-15T11:04:27,522 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000005/sysfs] 2024-11-15T11:04:27,535 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:48496 2024-11-15T11:04:29,292 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1731668480794_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:04:29,293 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293 2024-11-15T11:04:29,293 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:04:29,322 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:04:29,322 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T11:04:29,324 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:04:29,329 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T11:04:29,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742260_1436 (size=156) 2024-11-15T11:04:29,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742260_1436 (size=156) 2024-11-15T11:04:29,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742260_1436 (size=156) 2024-11-15T11:04:29,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742261_1437 (size=621) 2024-11-15T11:04:29,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742261_1437 (size=621) 2024-11-15T11:04:29,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742261_1437 (size=621) 2024-11-15T11:04:29,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:29,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:29,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-5830996286201075900.jar 2024-11-15T11:04:30,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-16146321742715613653.jar 2024-11-15T11:04:30,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:30,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:04:30,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:04:30,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:04:30,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:04:30,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:04:30,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:04:30,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:04:30,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:04:30,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:04:30,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:04:30,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:04:30,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:30,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:30,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:04:30,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:30,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:30,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:04:30,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:04:30,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742262_1438 (size=131440) 2024-11-15T11:04:30,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742262_1438 (size=131440) 2024-11-15T11:04:30,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742262_1438 (size=131440) 2024-11-15T11:04:30,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742263_1439 (size=4188619) 2024-11-15T11:04:30,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742263_1439 (size=4188619) 2024-11-15T11:04:30,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742263_1439 (size=4188619) 2024-11-15T11:04:30,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742264_1440 (size=1323991) 2024-11-15T11:04:30,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742264_1440 (size=1323991) 2024-11-15T11:04:30,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742264_1440 (size=1323991) 2024-11-15T11:04:30,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742265_1441 (size=903737) 2024-11-15T11:04:30,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742265_1441 (size=903737) 2024-11-15T11:04:30,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742265_1441 (size=903737) 2024-11-15T11:04:30,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742266_1442 (size=8360083) 2024-11-15T11:04:30,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742266_1442 (size=8360083) 2024-11-15T11:04:30,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742266_1442 (size=8360083) 2024-11-15T11:04:30,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742267_1443 (size=440656) 2024-11-15T11:04:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742267_1443 (size=440656) 2024-11-15T11:04:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742267_1443 (size=440656) 2024-11-15T11:04:30,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742268_1444 (size=1877034) 2024-11-15T11:04:30,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742268_1444 (size=1877034) 2024-11-15T11:04:30,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742268_1444 (size=1877034) 2024-11-15T11:04:30,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742269_1445 (size=77835) 2024-11-15T11:04:30,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742269_1445 (size=77835) 2024-11-15T11:04:30,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742269_1445 (size=77835) 2024-11-15T11:04:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742270_1446 (size=30949) 2024-11-15T11:04:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742270_1446 (size=30949) 2024-11-15T11:04:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742270_1446 (size=30949) 2024-11-15T11:04:30,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742271_1447 (size=1597327) 2024-11-15T11:04:30,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742271_1447 (size=1597327) 2024-11-15T11:04:30,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742271_1447 (size=1597327) 2024-11-15T11:04:30,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742272_1448 (size=4695811) 2024-11-15T11:04:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742272_1448 (size=4695811) 2024-11-15T11:04:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742272_1448 (size=4695811) 2024-11-15T11:04:30,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742273_1449 (size=232957) 2024-11-15T11:04:30,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742273_1449 (size=232957) 2024-11-15T11:04:30,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742273_1449 (size=232957) 2024-11-15T11:04:30,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742274_1450 (size=127628) 2024-11-15T11:04:30,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742274_1450 (size=127628) 2024-11-15T11:04:30,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742274_1450 (size=127628) 2024-11-15T11:04:30,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742275_1451 (size=20406) 2024-11-15T11:04:30,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742275_1451 (size=20406) 2024-11-15T11:04:30,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742275_1451 (size=20406) 2024-11-15T11:04:30,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742276_1452 (size=5175431) 2024-11-15T11:04:30,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742276_1452 (size=5175431) 2024-11-15T11:04:30,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742276_1452 (size=5175431) 2024-11-15T11:04:30,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742277_1453 (size=217634) 2024-11-15T11:04:30,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742277_1453 (size=217634) 2024-11-15T11:04:30,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742277_1453 (size=217634) 2024-11-15T11:04:30,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742278_1454 (size=1832290) 2024-11-15T11:04:30,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742278_1454 (size=1832290) 2024-11-15T11:04:30,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742278_1454 (size=1832290) 2024-11-15T11:04:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742279_1455 (size=322274) 2024-11-15T11:04:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742279_1455 (size=322274) 2024-11-15T11:04:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742279_1455 (size=322274) 2024-11-15T11:04:30,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742280_1456 (size=503880) 2024-11-15T11:04:30,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742280_1456 (size=503880) 2024-11-15T11:04:30,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742280_1456 (size=503880) 2024-11-15T11:04:30,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742281_1457 (size=6424745) 2024-11-15T11:04:30,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742281_1457 (size=6424745) 2024-11-15T11:04:30,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742281_1457 (size=6424745) 2024-11-15T11:04:30,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742282_1458 (size=29229) 2024-11-15T11:04:30,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742282_1458 (size=29229) 2024-11-15T11:04:30,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742282_1458 (size=29229) 2024-11-15T11:04:30,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742283_1459 (size=24096) 2024-11-15T11:04:30,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742283_1459 (size=24096) 2024-11-15T11:04:30,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742283_1459 (size=24096) 2024-11-15T11:04:30,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742284_1460 (size=111872) 2024-11-15T11:04:30,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742284_1460 (size=111872) 2024-11-15T11:04:30,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742284_1460 (size=111872) 2024-11-15T11:04:30,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742285_1461 (size=45609) 2024-11-15T11:04:30,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742285_1461 (size=45609) 2024-11-15T11:04:30,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742285_1461 (size=45609) 2024-11-15T11:04:30,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742286_1462 (size=136454) 2024-11-15T11:04:30,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742286_1462 (size=136454) 2024-11-15T11:04:30,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742286_1462 (size=136454) 2024-11-15T11:04:30,987 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:04:30,989 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-15T11:04:30,992 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:04:31,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742287_1463 (size=338) 2024-11-15T11:04:31,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742287_1463 (size=338) 2024-11-15T11:04:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742287_1463 (size=338) 2024-11-15T11:04:31,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742288_1464 (size=15) 2024-11-15T11:04:31,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742288_1464 (size=15) 2024-11-15T11:04:31,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742288_1464 (size=15) 2024-11-15T11:04:31,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742289_1465 (size=303730) 2024-11-15T11:04:31,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742289_1465 (size=303730) 2024-11-15T11:04:31,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742289_1465 (size=303730) 2024-11-15T11:04:33,649 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:04:33,650 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:04:33,654 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0008_000001 (auth:SIMPLE) from 127.0.0.1:46038 2024-11-15T11:04:33,665 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_1/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000001/launch_container.sh] 2024-11-15T11:04:33,665 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_1/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000001/container_tokens] 2024-11-15T11:04:33,665 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-1_1/usercache/jenkins/appcache/application_1731668480794_0008/container_1731668480794_0008_01_000001/sysfs] 2024-11-15T11:04:34,480 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0009_000001 (auth:SIMPLE) from 127.0.0.1:49728 2024-11-15T11:04:39,735 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0009_000001 (auth:SIMPLE) from 127.0.0.1:43260 2024-11-15T11:04:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742290_1466 (size=349380) 2024-11-15T11:04:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742290_1466 (size=349380) 2024-11-15T11:04:39,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742290_1466 (size=349380) 2024-11-15T11:04:40,220 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2764bea576bfa200194abca20722d84f, had cached 0 bytes from a total of 8394 2024-11-15T11:04:40,220 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 83fdacd28f9e0e342ef078be16ea2663, had cached 0 bytes from a total of 5216 2024-11-15T11:04:41,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:04:41,991 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0009_000001 (auth:SIMPLE) from 127.0.0.1:52916 2024-11-15T11:04:46,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742291_1467 (size=8394) 2024-11-15T11:04:46,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742291_1467 (size=8394) 2024-11-15T11:04:46,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742291_1467 (size=8394) 2024-11-15T11:04:46,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742292_1468 (size=5216) 2024-11-15T11:04:46,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742292_1468 (size=5216) 2024-11-15T11:04:46,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742292_1468 (size=5216) 2024-11-15T11:04:46,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742293_1469 (size=17413) 2024-11-15T11:04:46,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742293_1469 (size=17413) 2024-11-15T11:04:46,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742293_1469 (size=17413) 2024-11-15T11:04:46,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742294_1470 (size=462) 2024-11-15T11:04:46,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742294_1470 (size=462) 2024-11-15T11:04:46,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742294_1470 (size=462) 2024-11-15T11:04:46,386 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0009/container_1731668480794_0009_01_000002/launch_container.sh] 2024-11-15T11:04:46,386 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0009/container_1731668480794_0009_01_000002/container_tokens] 2024-11-15T11:04:46,386 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0009/container_1731668480794_0009_01_000002/sysfs] 2024-11-15T11:04:46,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742295_1471 (size=17413) 2024-11-15T11:04:46,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742295_1471 (size=17413) 2024-11-15T11:04:46,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742295_1471 (size=17413) 2024-11-15T11:04:46,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742296_1472 (size=349380) 2024-11-15T11:04:46,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742296_1472 (size=349380) 2024-11-15T11:04:46,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742296_1472 (size=349380) 2024-11-15T11:04:46,450 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0009_000001 (auth:SIMPLE) from 127.0.0.1:52926 2024-11-15T11:04:48,449 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:04:48,450 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:04:48,455 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-15T11:04:48,455 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:04:48,456 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:04:48,456 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-15T11:04:48,456 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-15T11:04:48,456 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-15T11:04:48,456 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-15T11:04:48,457 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-15T11:04:48,457 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668669293/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-15T11:04:48,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-15T11:04:48,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:04:48,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-15T11:04:48,465 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668688465"}]},"ts":"1731668688465"} 2024-11-15T11:04:48,467 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-15T11:04:48,467 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-15T11:04:48,467 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-15T11:04:48,469 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, UNASSIGN}, {pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, UNASSIGN}] 2024-11-15T11:04:48,469 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, UNASSIGN 2024-11-15T11:04:48,469 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, UNASSIGN 2024-11-15T11:04:48,470 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=83fdacd28f9e0e342ef078be16ea2663, regionState=CLOSING, regionLocation=4cdab6d79b20,36113,1731668474097 2024-11-15T11:04:48,470 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=2764bea576bfa200194abca20722d84f, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:04:48,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, UNASSIGN because future has completed 2024-11-15T11:04:48,472 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:04:48,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83fdacd28f9e0e342ef078be16ea2663, server=4cdab6d79b20,36113,1731668474097}] 2024-11-15T11:04:48,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, UNASSIGN because future has completed 2024-11-15T11:04:48,474 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:04:48,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2764bea576bfa200194abca20722d84f, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:04:48,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-15T11:04:48,625 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(122): Close 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:04:48,642 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:04:48,643 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1722): Closing 83fdacd28f9e0e342ef078be16ea2663, disabling compactions & flushes 2024-11-15T11:04:48,643 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:04:48,643 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:04:48,643 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. after waiting 0 ms 2024-11-15T11:04:48,643 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:04:48,643 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(122): Close 2764bea576bfa200194abca20722d84f 2024-11-15T11:04:48,643 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:04:48,643 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1722): Closing 2764bea576bfa200194abca20722d84f, disabling compactions & flushes 2024-11-15T11:04:48,643 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:04:48,644 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:04:48,644 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. after waiting 0 ms 2024-11-15T11:04:48,644 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:04:48,649 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:04:48,650 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:04:48,650 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663. 2024-11-15T11:04:48,650 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1676): Region close journal for 83fdacd28f9e0e342ef078be16ea2663: Waiting for close lock at 1731668688643Running coprocessor pre-close hooks at 1731668688643Disabling compacts and flushes for region at 1731668688643Disabling writes for close at 1731668688643Writing region close event to WAL at 1731668688644 (+1 ms)Running coprocessor post-close hooks at 1731668688650 (+6 ms)Closed at 1731668688650 2024-11-15T11:04:48,652 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(157): Closed 83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:04:48,653 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:04:48,653 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=83fdacd28f9e0e342ef078be16ea2663, regionState=CLOSED 2024-11-15T11:04:48,654 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:04:48,654 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f. 2024-11-15T11:04:48,654 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1676): Region close journal for 2764bea576bfa200194abca20722d84f: Waiting for close lock at 1731668688643Running coprocessor pre-close hooks at 1731668688643Disabling compacts and flushes for region at 1731668688643Disabling writes for close at 1731668688644 (+1 ms)Writing region close event to WAL at 1731668688648 (+4 ms)Running coprocessor post-close hooks at 1731668688653 (+5 ms)Closed at 1731668688654 (+1 ms) 2024-11-15T11:04:48,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83fdacd28f9e0e342ef078be16ea2663, server=4cdab6d79b20,36113,1731668474097 because future has completed 2024-11-15T11:04:48,656 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(157): Closed 2764bea576bfa200194abca20722d84f 2024-11-15T11:04:48,656 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=2764bea576bfa200194abca20722d84f, regionState=CLOSED 2024-11-15T11:04:48,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=188 2024-11-15T11:04:48,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=188, state=SUCCESS, hasLock=false; CloseRegionProcedure 83fdacd28f9e0e342ef078be16ea2663, server=4cdab6d79b20,36113,1731668474097 in 184 msec 2024-11-15T11:04:48,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=83fdacd28f9e0e342ef078be16ea2663, UNASSIGN in 190 msec 2024-11-15T11:04:48,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2764bea576bfa200194abca20722d84f, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:04:48,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-15T11:04:48,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; CloseRegionProcedure 2764bea576bfa200194abca20722d84f, server=4cdab6d79b20,39207,1731668474152 in 187 msec 2024-11-15T11:04:48,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=187 2024-11-15T11:04:48,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2764bea576bfa200194abca20722d84f, UNASSIGN in 193 msec 2024-11-15T11:04:48,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-15T11:04:48,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 198 msec 2024-11-15T11:04:48,668 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668688667"}]},"ts":"1731668688667"} 2024-11-15T11:04:48,669 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-15T11:04:48,669 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-15T11:04:48,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 210 msec 2024-11-15T11:04:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-15T11:04:48,786 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T11:04:48,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-15T11:04:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:04:48,788 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:04:48,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-15T11:04:48,789 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=192, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:04:48,791 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-15T11:04:48,793 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:04:48,793 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f 2024-11-15T11:04:48,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-15T11:04:48,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-15T11:04:48,795 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/recovered.edits] 2024-11-15T11:04:48,795 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/recovered.edits] 2024-11-15T11:04:48,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-15T11:04:48,796 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-15T11:04:48,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:48,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:48,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T11:04:48,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:48,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:48,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-15T11:04:48,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:48,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:48,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:48,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:48,802 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/cf/8d8efe2d44024bc0a4ab1fd0dee9ea38 2024-11-15T11:04:48,803 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0 to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/cf/da0ba667545d41beb17cf3f3341eecc0 2024-11-15T11:04:48,805 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663/recovered.edits/9.seqid 2024-11-15T11:04:48,805 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/83fdacd28f9e0e342ef078be16ea2663 2024-11-15T11:04:48,805 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f/recovered.edits/9.seqid 2024-11-15T11:04:48,806 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportWithChecksum/2764bea576bfa200194abca20722d84f 2024-11-15T11:04:48,806 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-15T11:04:48,808 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=192, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:04:48,811 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-15T11:04:48,813 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-15T11:04:48,814 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=192, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:04:48,814 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-15T11:04:48,814 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668688814"}]},"ts":"9223372036854775807"} 2024-11-15T11:04:48,814 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668688814"}]},"ts":"9223372036854775807"} 2024-11-15T11:04:48,816 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:04:48,816 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 83fdacd28f9e0e342ef078be16ea2663, NAME => 'testtb-testExportWithChecksum,,1731668634784.83fdacd28f9e0e342ef078be16ea2663.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2764bea576bfa200194abca20722d84f, NAME => 'testtb-testExportWithChecksum,1,1731668634784.2764bea576bfa200194abca20722d84f.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:04:48,816 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-15T11:04:48,817 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668688817"}]},"ts":"9223372036854775807"} 2024-11-15T11:04:48,818 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-15T11:04:48,819 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=192, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T11:04:48,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 33 msec 2024-11-15T11:04:48,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-15T11:04:48,906 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-15T11:04:48,906 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T11:04:48,911 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-15T11:04:48,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-15T11:04:48,914 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-15T11:04:48,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-15T11:04:48,938 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=803 (was 805), OpenFileDescriptor=801 (was 812), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=407 (was 410), ProcessCount=17 (was 17), AvailableMemoryMB=3128 (was 2542) - AvailableMemoryMB LEAK? - 2024-11-15T11:04:48,938 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-11-15T11:04:48,958 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=803, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=407, ProcessCount=17, AvailableMemoryMB=3127 2024-11-15T11:04:48,958 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-11-15T11:04:48,960 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T11:04:48,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:48,962 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T11:04:48,962 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:04:48,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 193 2024-11-15T11:04:48,963 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T11:04:48,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T11:04:48,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742297_1473 (size=418) 2024-11-15T11:04:48,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742297_1473 (size=418) 2024-11-15T11:04:48,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742297_1473 (size=418) 2024-11-15T11:04:48,973 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cde4df773c8afc6c85f88e9dd9d6ef19, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:04:48,973 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b0790a154cd09f85d2547b019649c6df, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:04:48,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742299_1475 (size=79) 2024-11-15T11:04:48,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742299_1475 (size=79) 2024-11-15T11:04:48,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742299_1475 (size=79) 2024-11-15T11:04:48,987 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:04:48,987 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing cde4df773c8afc6c85f88e9dd9d6ef19, disabling compactions & flushes 2024-11-15T11:04:48,987 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:48,987 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:48,987 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. after waiting 0 ms 2024-11-15T11:04:48,987 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:48,987 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:48,987 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for cde4df773c8afc6c85f88e9dd9d6ef19: Waiting for close lock at 1731668688987Disabling compacts and flushes for region at 1731668688987Disabling writes for close at 1731668688987Writing region close event to WAL at 1731668688987Closed at 1731668688987 2024-11-15T11:04:48,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742298_1474 (size=79) 2024-11-15T11:04:48,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742298_1474 (size=79) 2024-11-15T11:04:48,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742298_1474 (size=79) 2024-11-15T11:04:48,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:04:48,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing b0790a154cd09f85d2547b019649c6df, disabling compactions & flushes 2024-11-15T11:04:48,989 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:48,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:48,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. after waiting 0 ms 2024-11-15T11:04:48,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:48,989 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:48,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for b0790a154cd09f85d2547b019649c6df: Waiting for close lock at 1731668688989Disabling compacts and flushes for region at 1731668688989Disabling writes for close at 1731668688989Writing region close event to WAL at 1731668688989Closed at 1731668688989 2024-11-15T11:04:48,990 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T11:04:48,991 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731668688990"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668688990"}]},"ts":"1731668688990"} 2024-11-15T11:04:48,991 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731668688990"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731668688990"}]},"ts":"1731668688990"} 2024-11-15T11:04:48,994 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T11:04:48,995 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T11:04:48,995 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668688995"}]},"ts":"1731668688995"} 2024-11-15T11:04:48,997 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-15T11:04:48,997 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {4cdab6d79b20=0} racks are {/default-rack=0} 2024-11-15T11:04:48,998 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T11:04:48,998 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T11:04:48,998 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T11:04:48,998 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T11:04:48,998 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T11:04:48,998 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T11:04:48,998 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T11:04:48,999 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T11:04:48,999 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T11:04:48,999 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T11:04:48,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, ASSIGN}, {pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, ASSIGN}] 2024-11-15T11:04:49,000 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, ASSIGN 2024-11-15T11:04:49,000 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, ASSIGN 2024-11-15T11:04:49,001 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, ASSIGN; state=OFFLINE, location=4cdab6d79b20,39207,1731668474152; forceNewPlan=false, retain=false 2024-11-15T11:04:49,001 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, ASSIGN; state=OFFLINE, location=4cdab6d79b20,45865,1731668473964; forceNewPlan=false, retain=false 2024-11-15T11:04:49,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T11:04:49,151 INFO [4cdab6d79b20:33465 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T11:04:49,152 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=cde4df773c8afc6c85f88e9dd9d6ef19, regionState=OPENING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:04:49,152 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=b0790a154cd09f85d2547b019649c6df, regionState=OPENING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:04:49,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, ASSIGN because future has completed 2024-11-15T11:04:49,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:04:49,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, ASSIGN because future has completed 2024-11-15T11:04:49,155 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure b0790a154cd09f85d2547b019649c6df, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:04:49,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T11:04:49,309 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:49,310 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:49,310 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7752): Opening region: {ENCODED => cde4df773c8afc6c85f88e9dd9d6ef19, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T11:04:49,310 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7752): Opening region: {ENCODED => b0790a154cd09f85d2547b019649c6df, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T11:04:49,310 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. service=AccessControlService 2024-11-15T11:04:49,310 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. service=AccessControlService 2024-11-15T11:04:49,310 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:04:49,310 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T11:04:49,310 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,311 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,311 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:04:49,311 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T11:04:49,311 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7794): checking encryption for b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,311 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7797): checking classloading for b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,311 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7794): checking encryption for cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,311 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7797): checking classloading for cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,312 INFO [StoreOpener-cde4df773c8afc6c85f88e9dd9d6ef19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,312 INFO [StoreOpener-b0790a154cd09f85d2547b019649c6df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,313 INFO [StoreOpener-cde4df773c8afc6c85f88e9dd9d6ef19-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cde4df773c8afc6c85f88e9dd9d6ef19 columnFamilyName cf 2024-11-15T11:04:49,313 DEBUG [StoreOpener-cde4df773c8afc6c85f88e9dd9d6ef19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:04:49,313 INFO [StoreOpener-b0790a154cd09f85d2547b019649c6df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b0790a154cd09f85d2547b019649c6df columnFamilyName cf 2024-11-15T11:04:49,313 DEBUG [StoreOpener-b0790a154cd09f85d2547b019649c6df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T11:04:49,314 INFO [StoreOpener-cde4df773c8afc6c85f88e9dd9d6ef19-1 {}] regionserver.HStore(327): Store=cde4df773c8afc6c85f88e9dd9d6ef19/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:04:49,314 INFO [StoreOpener-b0790a154cd09f85d2547b019649c6df-1 {}] regionserver.HStore(327): Store=b0790a154cd09f85d2547b019649c6df/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T11:04:49,314 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1038): replaying wal for cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,314 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1038): replaying wal for b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,315 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,315 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,315 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,315 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,316 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1048): stopping wal replay for b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,316 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1060): Cleaning up temporary data for b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,316 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1048): stopping wal replay for cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,316 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1060): Cleaning up temporary data for cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,317 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1093): writing seq id for cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,317 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1093): writing seq id for b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,320 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:04:49,320 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T11:04:49,320 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1114): Opened b0790a154cd09f85d2547b019649c6df; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65838480, jitterRate=-0.01893019676208496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:04:49,320 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,320 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1114): Opened cde4df773c8afc6c85f88e9dd9d6ef19; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61217724, jitterRate=-0.08778482675552368}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T11:04:49,320 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,321 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1006): Region open journal for b0790a154cd09f85d2547b019649c6df: Running coprocessor pre-open hook at 1731668689311Writing region info on filesystem at 1731668689311Initializing all the Stores at 1731668689311Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668689311Cleaning up temporary data from old regions at 1731668689316 (+5 ms)Running coprocessor post-open hooks at 1731668689320 (+4 ms)Region opened successfully at 1731668689321 (+1 ms) 2024-11-15T11:04:49,321 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1006): Region open journal for cde4df773c8afc6c85f88e9dd9d6ef19: Running coprocessor pre-open hook at 1731668689311Writing region info on filesystem at 1731668689311Initializing all the Stores at 1731668689312 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731668689312Cleaning up temporary data from old regions at 1731668689316 (+4 ms)Running coprocessor post-open hooks at 1731668689320 (+4 ms)Region opened successfully at 1731668689321 (+1 ms) 2024-11-15T11:04:49,322 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19., pid=196, masterSystemTime=1731668689306 2024-11-15T11:04:49,322 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df., pid=197, masterSystemTime=1731668689307 2024-11-15T11:04:49,324 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:49,324 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:49,325 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=cde4df773c8afc6c85f88e9dd9d6ef19, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:04:49,325 DEBUG [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:49,325 INFO [RS_OPEN_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:49,326 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=b0790a154cd09f85d2547b019649c6df, regionState=OPEN, openSeqNum=2, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:04:49,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:04:49,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure b0790a154cd09f85d2547b019649c6df, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:04:49,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=194 2024-11-15T11:04:49,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=194, state=SUCCESS, hasLock=false; OpenRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19, server=4cdab6d79b20,39207,1731668474152 in 175 msec 2024-11-15T11:04:49,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-11-15T11:04:49,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; OpenRegionProcedure b0790a154cd09f85d2547b019649c6df, server=4cdab6d79b20,45865,1731668473964 in 175 msec 2024-11-15T11:04:49,332 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, ASSIGN in 332 msec 2024-11-15T11:04:49,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=195, resume processing ppid=193 2024-11-15T11:04:49,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, ASSIGN in 333 msec 2024-11-15T11:04:49,334 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T11:04:49,334 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668689334"}]},"ts":"1731668689334"} 2024-11-15T11:04:49,336 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-15T11:04:49,337 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T11:04:49,337 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-15T11:04:49,340 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-15T11:04:49,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:49,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:49,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:49,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:04:49,345 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,345 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,345 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,345 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T11:04:49,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 386 msec 2024-11-15T11:04:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T11:04:49,596 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T11:04:49,596 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-11-15T11:04:49,596 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:04:49,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-11-15T11:04:49,601 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:04:49,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-11-15T11:04:49,601 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T11:04:49,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T11:04:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668689604 (current time:1731668689604). 2024-11-15T11:04:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:04:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-15T11:04:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:04:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e58ac7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:04:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:04:49,606 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:04:49,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:04:49,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:04:49,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29bf9048, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:49,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:04:49,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:04:49,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:49,608 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:04:49,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@85e7387, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:04:49,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:04:49,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:04:49,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48622, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:04:49,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:04:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:04:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:49,612 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:04:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@377abb08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:04:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:04:49,614 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:04:49,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:04:49,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:04:49,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4545a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:49,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:04:49,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:04:49,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:49,615 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42956, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:04:49,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c8b1a9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:04:49,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:04:49,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:04:49,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48634, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:04:49,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:04:49,621 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:04:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:04:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:49,621 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:04:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-15T11:04:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:04:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T11:04:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-15T11:04:49,624 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:04:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T11:04:49,625 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:04:49,627 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:04:49,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742300_1476 (size=203) 2024-11-15T11:04:49,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742300_1476 (size=203) 2024-11-15T11:04:49,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742300_1476 (size=203) 2024-11-15T11:04:49,634 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:04:49,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19}, {pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df}] 2024-11-15T11:04:49,635 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,635 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T11:04:49,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=200 2024-11-15T11:04:49,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=199 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.HRegion(2603): Flush status journal for b0790a154cd09f85d2547b019649c6df: 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.HRegion(2603): Flush status journal for cde4df773c8afc6c85f88e9dd9d6ef19: 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:04:49,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T11:04:49,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742301_1477 (size=82) 2024-11-15T11:04:49,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742301_1477 (size=82) 2024-11-15T11:04:49,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742301_1477 (size=82) 2024-11-15T11:04:49,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742302_1478 (size=82) 2024-11-15T11:04:49,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742302_1478 (size=82) 2024-11-15T11:04:49,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742302_1478 (size=82) 2024-11-15T11:04:49,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:49,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=200 2024-11-15T11:04:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=200 2024-11-15T11:04:49,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,807 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:49,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df in 174 msec 2024-11-15T11:04:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T11:04:50,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:50,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=199 2024-11-15T11:04:50,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=199 2024-11-15T11:04:50,202 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:50,202 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:50,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=198 2024-11-15T11:04:50,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19 in 574 msec 2024-11-15T11:04:50,211 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:04:50,212 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:04:50,213 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:04:50,213 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:50,214 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:50,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742303_1479 (size=585) 2024-11-15T11:04:50,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742303_1479 (size=585) 2024-11-15T11:04:50,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742303_1479 (size=585) 2024-11-15T11:04:50,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T11:04:50,266 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:04:50,273 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:04:50,273 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:50,275 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:04:50,275 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-15T11:04:50,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 653 msec 2024-11-15T11:04:50,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T11:04:50,766 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T11:04:50,771 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='0e546ae6dafefaa0603ce58037eb9fdfe', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:04:50,775 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='16fb286a9f0197961d37435894d095680', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:04:50,776 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2d28b61e0e29187a0cf5dc81304a48572', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:04:50,781 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='3ce03c94c2eb7da28b479bdb1878408cf', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:04:50,782 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='4a4ce2d8ed017129aeef4b71520257ce3', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:04:50,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:04:50,791 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='5326f152a81c728266d3d42859d36c181', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df., hostname=4cdab6d79b20,45865,1731668473964, seqNum=2] 2024-11-15T11:04:50,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39207 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T11:04:50,798 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T11:04:50,801 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:50,801 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:50,801 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T11:04:50,803 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T11:04:50,809 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T11:04:50,815 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T11:04:50,818 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T11:04:50,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731668690818 (current time:1731668690818). 2024-11-15T11:04:50,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T11:04:50,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-15T11:04:50,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T11:04:50,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72bd96f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:50,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:04:50,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:04:50,822 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:04:50,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:04:50,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:04:50,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15aaf898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:50,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:04:50,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:04:50,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:50,825 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:04:50,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e609c88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:50,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:04:50,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:04:50,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:04:50,828 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57980, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:04:50,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:04:50,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:04:50,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:50,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:50,830 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:04:50,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@493bcece, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:50,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ClusterIdFetcher(90): Going to request 4cdab6d79b20,33465,-1 for getting cluster id 2024-11-15T11:04:50,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T11:04:50,833 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eec1d4a-af98-41cc-ba5a-34a7b19499cc' 2024-11-15T11:04:50,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T11:04:50,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eec1d4a-af98-41cc-ba5a-34a7b19499cc" 2024-11-15T11:04:50,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@744d345e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:50,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4cdab6d79b20,33465,-1] 2024-11-15T11:04:50,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T11:04:50,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:50,835 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T11:04:50,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76495d01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T11:04:50,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T11:04:50,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4cdab6d79b20,39207,1731668474152, seqNum=-1] 2024-11-15T11:04:50,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T11:04:50,839 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57982, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T11:04:50,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., hostname=4cdab6d79b20,39207,1731668474152, seqNum=2] 2024-11-15T11:04:50,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465. 2024-11-15T11:04:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T11:04:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:04:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-15T11:04:50,844 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:04:50,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T11:04:50,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T11:04:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-15T11:04:50,847 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T11:04:50,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T11:04:50,848 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T11:04:50,851 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T11:04:50,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742304_1480 (size=198) 2024-11-15T11:04:50,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742304_1480 (size=198) 2024-11-15T11:04:50,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742304_1480 (size=198) 2024-11-15T11:04:50,901 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T11:04:50,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df}] 2024-11-15T11:04:50,902 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:50,903 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:50,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T11:04:51,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-15T11:04:51,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:51,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39207 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-15T11:04:51,055 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2902): Flushing b0790a154cd09f85d2547b019649c6df 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-15T11:04:51,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:51,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2902): Flushing cde4df773c8afc6c85f88e9dd9d6ef19 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-15T11:04:51,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/.tmp/cf/dee3537e8f7040708890ddf223a54d3e is 71, key is 10206988c80af69cd491a419c99cded6/cf:q/1731668690786/Put/seqid=0 2024-11-15T11:04:51,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/.tmp/cf/63c91c6c59314b6da4f885b0f9101d0d is 71, key is 003fdcf16613ae0129cfa568ab97cb13/cf:q/1731668690796/Put/seqid=0 2024-11-15T11:04:51,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742305_1481 (size=8256) 2024-11-15T11:04:51,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742305_1481 (size=8256) 2024-11-15T11:04:51,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742305_1481 (size=8256) 2024-11-15T11:04:51,091 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/.tmp/cf/dee3537e8f7040708890ddf223a54d3e 2024-11-15T11:04:51,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742306_1482 (size=5356) 2024-11-15T11:04:51,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/.tmp/cf/dee3537e8f7040708890ddf223a54d3e as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/cf/dee3537e8f7040708890ddf223a54d3e 2024-11-15T11:04:51,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742306_1482 (size=5356) 2024-11-15T11:04:51,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742306_1482 (size=5356) 2024-11-15T11:04:51,107 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/.tmp/cf/63c91c6c59314b6da4f885b0f9101d0d 2024-11-15T11:04:51,111 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/cf/dee3537e8f7040708890ddf223a54d3e, entries=46, sequenceid=6, filesize=8.1 K 2024-11-15T11:04:51,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for b0790a154cd09f85d2547b019649c6df in 58ms, sequenceid=6, compaction requested=false 2024-11-15T11:04:51,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-15T11:04:51,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for b0790a154cd09f85d2547b019649c6df: 2024-11-15T11:04:51,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/.tmp/cf/63c91c6c59314b6da4f885b0f9101d0d as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/cf/63c91c6c59314b6da4f885b0f9101d0d 2024-11-15T11:04:51,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T11:04:51,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:04:51,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/cf/dee3537e8f7040708890ddf223a54d3e] hfiles 2024-11-15T11:04:51,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/cf/dee3537e8f7040708890ddf223a54d3e for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,124 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/cf/63c91c6c59314b6da4f885b0f9101d0d, entries=4, sequenceid=6, filesize=5.2 K 2024-11-15T11:04:51,125 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for cde4df773c8afc6c85f88e9dd9d6ef19 in 69ms, sequenceid=6, compaction requested=false 2024-11-15T11:04:51,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for cde4df773c8afc6c85f88e9dd9d6ef19: 2024-11-15T11:04:51,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T11:04:51,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T11:04:51,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/cf/63c91c6c59314b6da4f885b0f9101d0d] hfiles 2024-11-15T11:04:51,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/cf/63c91c6c59314b6da4f885b0f9101d0d for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742307_1483 (size=121) 2024-11-15T11:04:51,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742307_1483 (size=121) 2024-11-15T11:04:51,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742307_1483 (size=121) 2024-11-15T11:04:51,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:04:51,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-15T11:04:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-15T11:04:51,142 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:51,143 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df 2024-11-15T11:04:51,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b0790a154cd09f85d2547b019649c6df in 242 msec 2024-11-15T11:04:51,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742308_1484 (size=121) 2024-11-15T11:04:51,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742308_1484 (size=121) 2024-11-15T11:04:51,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742308_1484 (size=121) 2024-11-15T11:04:51,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:04:51,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/4cdab6d79b20:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-15T11:04:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-15T11:04:51,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:51,161 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:04:51,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=201 2024-11-15T11:04:51,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19 in 261 msec 2024-11-15T11:04:51,163 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T11:04:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T11:04:51,165 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T11:04:51,165 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T11:04:51,165 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,166 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742309_1485 (size=663) 2024-11-15T11:04:51,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742309_1485 (size=663) 2024-11-15T11:04:51,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742309_1485 (size=663) 2024-11-15T11:04:51,189 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T11:04:51,194 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T11:04:51,194 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,196 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T11:04:51,196 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-15T11:04:51,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 352 msec 2024-11-15T11:04:51,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T11:04:51,476 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T11:04:51,476 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476 2024-11-15T11:04:51,476 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37487, tgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476, rawTgtDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476, srcFsUri=hdfs://localhost:37487, srcDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:04:51,525 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37487, inputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7 2024-11-15T11:04:51,525 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,527 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T11:04:51,534 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:51,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742310_1486 (size=198) 2024-11-15T11:04:51,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742310_1486 (size=198) 2024-11-15T11:04:51,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742310_1486 (size=198) 2024-11-15T11:04:51,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742311_1487 (size=663) 2024-11-15T11:04:51,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742311_1487 (size=663) 2024-11-15T11:04:51,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742311_1487 (size=663) 2024-11-15T11:04:51,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:51,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:51,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,567 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0009_000001 (auth:SIMPLE) from 127.0.0.1:55696 2024-11-15T11:04:52,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-9019050040992181045.jar 2024-11-15T11:04:52,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop-671883522118998498.jar 2024-11-15T11:04:52,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T11:04:52,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T11:04:52,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T11:04:52,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T11:04:52,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T11:04:52,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T11:04:52,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T11:04:52,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T11:04:52,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T11:04:52,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T11:04:52,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T11:04:52,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T11:04:52,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:52,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:52,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:04:52,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:52,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T11:04:52,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:04:52,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T11:04:52,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742312_1488 (size=131440) 2024-11-15T11:04:52,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742312_1488 (size=131440) 2024-11-15T11:04:52,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742312_1488 (size=131440) 2024-11-15T11:04:52,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742313_1489 (size=440656) 2024-11-15T11:04:52,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742313_1489 (size=440656) 2024-11-15T11:04:52,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742313_1489 (size=440656) 2024-11-15T11:04:52,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742314_1490 (size=4188619) 2024-11-15T11:04:52,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742314_1490 (size=4188619) 2024-11-15T11:04:52,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742314_1490 (size=4188619) 2024-11-15T11:04:52,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742315_1491 (size=1323991) 2024-11-15T11:04:52,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742315_1491 (size=1323991) 2024-11-15T11:04:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742315_1491 (size=1323991) 2024-11-15T11:04:53,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742316_1492 (size=903737) 2024-11-15T11:04:53,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742316_1492 (size=903737) 2024-11-15T11:04:53,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742316_1492 (size=903737) 2024-11-15T11:04:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742317_1493 (size=8360083) 2024-11-15T11:04:53,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742317_1493 (size=8360083) 2024-11-15T11:04:53,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742317_1493 (size=8360083) 2024-11-15T11:04:53,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742318_1494 (size=1877034) 2024-11-15T11:04:53,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742318_1494 (size=1877034) 2024-11-15T11:04:53,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742318_1494 (size=1877034) 2024-11-15T11:04:53,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742319_1495 (size=77835) 2024-11-15T11:04:53,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742319_1495 (size=77835) 2024-11-15T11:04:53,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742319_1495 (size=77835) 2024-11-15T11:04:53,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742320_1496 (size=30949) 2024-11-15T11:04:53,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742320_1496 (size=30949) 2024-11-15T11:04:53,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742320_1496 (size=30949) 2024-11-15T11:04:53,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742321_1497 (size=1597327) 2024-11-15T11:04:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742321_1497 (size=1597327) 2024-11-15T11:04:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742321_1497 (size=1597327) 2024-11-15T11:04:53,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742322_1498 (size=4695811) 2024-11-15T11:04:53,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742322_1498 (size=4695811) 2024-11-15T11:04:53,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742322_1498 (size=4695811) 2024-11-15T11:04:53,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742323_1499 (size=232957) 2024-11-15T11:04:53,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742323_1499 (size=232957) 2024-11-15T11:04:53,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742323_1499 (size=232957) 2024-11-15T11:04:53,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742324_1500 (size=127628) 2024-11-15T11:04:53,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742324_1500 (size=127628) 2024-11-15T11:04:53,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742324_1500 (size=127628) 2024-11-15T11:04:53,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742325_1501 (size=20406) 2024-11-15T11:04:53,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742325_1501 (size=20406) 2024-11-15T11:04:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742325_1501 (size=20406) 2024-11-15T11:04:53,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742326_1502 (size=5175431) 2024-11-15T11:04:53,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742326_1502 (size=5175431) 2024-11-15T11:04:53,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742326_1502 (size=5175431) 2024-11-15T11:04:53,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742327_1503 (size=217634) 2024-11-15T11:04:53,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742327_1503 (size=217634) 2024-11-15T11:04:53,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742327_1503 (size=217634) 2024-11-15T11:04:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742328_1504 (size=1832290) 2024-11-15T11:04:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742328_1504 (size=1832290) 2024-11-15T11:04:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742328_1504 (size=1832290) 2024-11-15T11:04:53,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742329_1505 (size=322274) 2024-11-15T11:04:53,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742329_1505 (size=322274) 2024-11-15T11:04:53,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742329_1505 (size=322274) 2024-11-15T11:04:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742330_1506 (size=503880) 2024-11-15T11:04:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742330_1506 (size=503880) 2024-11-15T11:04:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742330_1506 (size=503880) 2024-11-15T11:04:53,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742331_1507 (size=6424745) 2024-11-15T11:04:53,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742331_1507 (size=6424745) 2024-11-15T11:04:53,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742331_1507 (size=6424745) 2024-11-15T11:04:53,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742332_1508 (size=29229) 2024-11-15T11:04:53,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742332_1508 (size=29229) 2024-11-15T11:04:53,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742332_1508 (size=29229) 2024-11-15T11:04:53,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742333_1509 (size=24096) 2024-11-15T11:04:53,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742333_1509 (size=24096) 2024-11-15T11:04:53,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742333_1509 (size=24096) 2024-11-15T11:04:53,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742334_1510 (size=111872) 2024-11-15T11:04:53,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742334_1510 (size=111872) 2024-11-15T11:04:53,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742334_1510 (size=111872) 2024-11-15T11:04:53,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742335_1511 (size=45609) 2024-11-15T11:04:53,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742335_1511 (size=45609) 2024-11-15T11:04:53,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742335_1511 (size=45609) 2024-11-15T11:04:53,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742336_1512 (size=136454) 2024-11-15T11:04:53,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742336_1512 (size=136454) 2024-11-15T11:04:53,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742336_1512 (size=136454) 2024-11-15T11:04:53,258 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T11:04:53,260 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-15T11:04:53,262 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T11:04:53,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742337_1513 (size=366) 2024-11-15T11:04:53,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742337_1513 (size=366) 2024-11-15T11:04:53,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742337_1513 (size=366) 2024-11-15T11:04:53,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742338_1514 (size=15) 2024-11-15T11:04:53,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742338_1514 (size=15) 2024-11-15T11:04:53,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742338_1514 (size=15) 2024-11-15T11:04:53,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742339_1515 (size=303900) 2024-11-15T11:04:53,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742339_1515 (size=303900) 2024-11-15T11:04:53,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742339_1515 (size=303900) 2024-11-15T11:04:53,306 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:04:53,306 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T11:04:53,559 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0010_000001 (auth:SIMPLE) from 127.0.0.1:55712 2024-11-15T11:04:53,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:04:53,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-15T11:04:53,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-15T11:04:54,184 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:04:57,679 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0009/container_1731668480794_0009_01_000001/launch_container.sh] 2024-11-15T11:04:57,679 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0009/container_1731668480794_0009_01_000001/container_tokens] 2024-11-15T11:04:57,679 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0009/container_1731668480794_0009_01_000001/sysfs] 2024-11-15T11:04:58,993 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0010_000001 (auth:SIMPLE) from 127.0.0.1:38906 2024-11-15T11:04:59,090 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:04:59,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742340_1516 (size=349574) 2024-11-15T11:04:59,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742340_1516 (size=349574) 2024-11-15T11:04:59,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742340_1516 (size=349574) 2024-11-15T11:05:00,371 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 26514d1e32bf69240f59f1d352a7ad1d, had cached 0 bytes from a total of 5566 2024-11-15T11:05:00,372 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 62aaee921674ca29eb5bfbb30b16733a, had cached 0 bytes from a total of 8052 2024-11-15T11:05:01,254 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0010_000001 (auth:SIMPLE) from 127.0.0.1:32880 2024-11-15T11:05:05,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742341_1517 (size=8256) 2024-11-15T11:05:05,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742341_1517 (size=8256) 2024-11-15T11:05:05,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742341_1517 (size=8256) 2024-11-15T11:05:05,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742342_1518 (size=5356) 2024-11-15T11:05:05,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742342_1518 (size=5356) 2024-11-15T11:05:05,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742342_1518 (size=5356) 2024-11-15T11:05:05,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742343_1519 (size=17455) 2024-11-15T11:05:05,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742343_1519 (size=17455) 2024-11-15T11:05:05,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742343_1519 (size=17455) 2024-11-15T11:05:05,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742344_1520 (size=476) 2024-11-15T11:05:05,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742344_1520 (size=476) 2024-11-15T11:05:05,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742344_1520 (size=476) 2024-11-15T11:05:05,370 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0010/container_1731668480794_0010_01_000002/launch_container.sh] 2024-11-15T11:05:05,371 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0010/container_1731668480794_0010_01_000002/container_tokens] 2024-11-15T11:05:05,371 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_2/usercache/jenkins/appcache/application_1731668480794_0010/container_1731668480794_0010_01_000002/sysfs] 2024-11-15T11:05:05,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742345_1521 (size=17455) 2024-11-15T11:05:05,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742345_1521 (size=17455) 2024-11-15T11:05:05,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742345_1521 (size=17455) 2024-11-15T11:05:05,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742346_1522 (size=349574) 2024-11-15T11:05:05,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742346_1522 (size=349574) 2024-11-15T11:05:05,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742346_1522 (size=349574) 2024-11-15T11:05:05,440 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0010_000001 (auth:SIMPLE) from 127.0.0.1:32892 2024-11-15T11:05:07,447 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T11:05:07,447 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T11:05:07,452 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,452 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T11:05:07,453 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T11:05:07,453 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,454 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-15T11:05:07,454 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-15T11:05:07,454 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1561837080_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,454 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-15T11:05:07,454 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/export-test/export-1731668691476/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-15T11:05:07,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-15T11:05:07,462 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668707462"}]},"ts":"1731668707462"} 2024-11-15T11:05:07,464 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-15T11:05:07,464 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-15T11:05:07,465 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-15T11:05:07,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, UNASSIGN}, {pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, UNASSIGN}] 2024-11-15T11:05:07,467 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, UNASSIGN 2024-11-15T11:05:07,467 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, UNASSIGN 2024-11-15T11:05:07,468 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=cde4df773c8afc6c85f88e9dd9d6ef19, regionState=CLOSING, regionLocation=4cdab6d79b20,39207,1731668474152 2024-11-15T11:05:07,468 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=b0790a154cd09f85d2547b019649c6df, regionState=CLOSING, regionLocation=4cdab6d79b20,45865,1731668473964 2024-11-15T11:05:07,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, UNASSIGN because future has completed 2024-11-15T11:05:07,469 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:05:07,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19, server=4cdab6d79b20,39207,1731668474152}] 2024-11-15T11:05:07,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, UNASSIGN because future has completed 2024-11-15T11:05:07,470 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T11:05:07,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure b0790a154cd09f85d2547b019649c6df, server=4cdab6d79b20,45865,1731668473964}] 2024-11-15T11:05:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-15T11:05:07,621 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(122): Close cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:05:07,622 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:05:07,622 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1722): Closing cde4df773c8afc6c85f88e9dd9d6ef19, disabling compactions & flushes 2024-11-15T11:05:07,622 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:05:07,622 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:05:07,622 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. after waiting 0 ms 2024-11-15T11:05:07,622 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:05:07,622 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(122): Close b0790a154cd09f85d2547b019649c6df 2024-11-15T11:05:07,622 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T11:05:07,623 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1722): Closing b0790a154cd09f85d2547b019649c6df, disabling compactions & flushes 2024-11-15T11:05:07,623 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:05:07,623 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:05:07,623 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. after waiting 0 ms 2024-11-15T11:05:07,623 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:05:07,627 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:05:07,627 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T11:05:07,628 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:07,628 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:07,628 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19. 2024-11-15T11:05:07,628 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1676): Region close journal for cde4df773c8afc6c85f88e9dd9d6ef19: Waiting for close lock at 1731668707622Running coprocessor pre-close hooks at 1731668707622Disabling compacts and flushes for region at 1731668707622Disabling writes for close at 1731668707622Writing region close event to WAL at 1731668707623 (+1 ms)Running coprocessor post-close hooks at 1731668707628 (+5 ms)Closed at 1731668707628 2024-11-15T11:05:07,628 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df. 2024-11-15T11:05:07,628 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1676): Region close journal for b0790a154cd09f85d2547b019649c6df: Waiting for close lock at 1731668707623Running coprocessor pre-close hooks at 1731668707623Disabling compacts and flushes for region at 1731668707623Disabling writes for close at 1731668707623Writing region close event to WAL at 1731668707623Running coprocessor post-close hooks at 1731668707628 (+5 ms)Closed at 1731668707628 2024-11-15T11:05:07,629 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(157): Closed cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:05:07,630 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=cde4df773c8afc6c85f88e9dd9d6ef19, regionState=CLOSED 2024-11-15T11:05:07,630 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(157): Closed b0790a154cd09f85d2547b019649c6df 2024-11-15T11:05:07,631 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=b0790a154cd09f85d2547b019649c6df, regionState=CLOSED 2024-11-15T11:05:07,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19, server=4cdab6d79b20,39207,1731668474152 because future has completed 2024-11-15T11:05:07,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure b0790a154cd09f85d2547b019649c6df, server=4cdab6d79b20,45865,1731668473964 because future has completed 2024-11-15T11:05:07,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=206 2024-11-15T11:05:07,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure cde4df773c8afc6c85f88e9dd9d6ef19, server=4cdab6d79b20,39207,1731668474152 in 163 msec 2024-11-15T11:05:07,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-11-15T11:05:07,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; CloseRegionProcedure b0790a154cd09f85d2547b019649c6df, server=4cdab6d79b20,45865,1731668473964 in 164 msec 2024-11-15T11:05:07,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=cde4df773c8afc6c85f88e9dd9d6ef19, UNASSIGN in 168 msec 2024-11-15T11:05:07,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=205 2024-11-15T11:05:07,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b0790a154cd09f85d2547b019649c6df, UNASSIGN in 169 msec 2024-11-15T11:05:07,639 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-15T11:05:07,639 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 172 msec 2024-11-15T11:05:07,640 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731668707640"}]},"ts":"1731668707640"} 2024-11-15T11:05:07,641 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-15T11:05:07,641 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-15T11:05:07,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 183 msec 2024-11-15T11:05:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-15T11:05:07,775 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T11:05:07,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] procedure2.ProcedureExecutor(1139): Stored pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,777 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,778 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=210, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,780 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39207 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,782 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:05:07,782 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df 2024-11-15T11:05:07,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,784 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/recovered.edits] 2024-11-15T11:05:07,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T11:05:07,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T11:05:07,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T11:05:07,784 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/cf, FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/recovered.edits] 2024-11-15T11:05:07,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:05:07,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T11:05:07,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-15T11:05:07,787 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:05:07,787 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:05:07,787 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:05:07,787 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T11:05:07,789 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/cf/63c91c6c59314b6da4f885b0f9101d0d to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/cf/63c91c6c59314b6da4f885b0f9101d0d 2024-11-15T11:05:07,789 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/cf/dee3537e8f7040708890ddf223a54d3e to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/cf/dee3537e8f7040708890ddf223a54d3e 2024-11-15T11:05:07,791 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19/recovered.edits/9.seqid 2024-11-15T11:05:07,792 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/cde4df773c8afc6c85f88e9dd9d6ef19 2024-11-15T11:05:07,792 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/recovered.edits/9.seqid to hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df/recovered.edits/9.seqid 2024-11-15T11:05:07,792 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testtb-testExportFileSystemStateWithSkipTmp/b0790a154cd09f85d2547b019649c6df 2024-11-15T11:05:07,792 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-15T11:05:07,795 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=210, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,797 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-15T11:05:07,799 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-15T11:05:07,800 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=210, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,800 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-15T11:05:07,800 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668707800"}]},"ts":"9223372036854775807"} 2024-11-15T11:05:07,800 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731668707800"}]},"ts":"9223372036854775807"} 2024-11-15T11:05:07,802 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T11:05:07,802 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cde4df773c8afc6c85f88e9dd9d6ef19, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731668688960.cde4df773c8afc6c85f88e9dd9d6ef19.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b0790a154cd09f85d2547b019649c6df, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731668688960.b0790a154cd09f85d2547b019649c6df.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T11:05:07,802 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-15T11:05:07,802 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731668707802"}]},"ts":"9223372036854775807"} 2024-11-15T11:05:07,804 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-15T11:05:07,805 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=210, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 29 msec 2024-11-15T11:05:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-15T11:05:07,895 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,895 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T11:05:07,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-15T11:05:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-15T11:05:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:07,925 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=809 (was 803) Potentially hanging thread: IPC Client (474028655) connection to localhost/127.0.0.1:33681 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7239 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:33542 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1561837080_22 at /127.0.0.1:41460 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 11207) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1110697642_1 at /127.0.0.1:33518 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33681 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1110697642_1 at /127.0.0.1:47960 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=795 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=438 (was 407) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2932 (was 3127) 2024-11-15T11:05:07,925 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-11-15T11:05:07,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-15T11:05:07,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3bfaa534{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T11:05:07,935 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3daaeea9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:05:07,935 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:05:07,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40f9fdc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T11:05:07,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7821ccae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:05:11,548 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731668480794_0010_000001 (auth:SIMPLE) from 127.0.0.1:53498 2024-11-15T11:05:11,566 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0010/container_1731668480794_0010_01_000001/launch_container.sh] 2024-11-15T11:05:11,566 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0010/container_1731668480794_0010_01_000001/container_tokens] 2024-11-15T11:05:11,566 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_346509673/yarn-7173798541/MiniMRCluster_346509673-localDir-nm-0_0/usercache/jenkins/appcache/application_1731668480794_0010/container_1731668480794_0010_01_000001/sysfs] 2024-11-15T11:05:11,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:05:13,147 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:05:13,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T11:05:15,779 WARN [regionserver/4cdab6d79b20:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 5, running: 1 2024-11-15T11:05:19,089 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:05:24,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@487b2ed9{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T11:05:24,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a545a45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:05:24,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:05:24,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70de798d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T11:05:24,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e117a92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:05:41,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:05:41,959 ERROR [Thread[Thread-400,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-15T11:05:41,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68d6c8c{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-15T11:05:41,960 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2365d1eb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:05:41,960 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:05:41,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2096f1fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T11:05:41,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c6effca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:05:41,964 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-15T11:05:41,969 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-15T11:05:41,969 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-15T11:05:41,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741830_1006 (size=969025) 2024-11-15T11:05:41,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741830_1006 (size=969025) 2024-11-15T11:05:41,974 ERROR [Thread[Thread-423,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-15T11:05:41,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2dc7160a{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-15T11:05:41,979 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d7cc728{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:05:41,979 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:05:41,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15d24492{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T11:05:41,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d74461e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:05:41,981 ERROR [Thread[Thread-382,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-15T11:05:41,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-15T11:05:41,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T11:05:41,981 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:05:41,981 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:05:41,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:41,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:41,981 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T11:05:41,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T11:05:41,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=938583163, stopped=false 2024-11-15T11:05:41,982 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:41,982 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-15T11:05:41,982 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4cdab6d79b20,33465,1731668473053 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:05:41,984 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:05:41,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:05:41,985 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:05:41,985 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:05:41,985 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T11:05:41,985 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:05:41,985 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:05:41,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:41,986 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4cdab6d79b20,45865,1731668473964' ***** 2024-11-15T11:05:41,986 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T11:05:41,986 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:41,986 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:05:41,986 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4cdab6d79b20,36113,1731668474097' ***** 2024-11-15T11:05:41,986 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:41,986 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:05:41,986 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:05:41,986 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:05:41,986 INFO [RS:0;4cdab6d79b20:45865 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:05:41,986 INFO [RS:1;4cdab6d79b20:36113 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:05:41,987 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:05:41,987 INFO [RS:0;4cdab6d79b20:45865 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:05:41,987 INFO [RS:1;4cdab6d79b20:36113 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:05:41,987 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:05:41,987 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(959): stopping server 4cdab6d79b20,36113,1731668474097 2024-11-15T11:05:41,987 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:05:41,987 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(3091): Received CLOSE for 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:05:41,987 INFO [RS:1;4cdab6d79b20:36113 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;4cdab6d79b20:36113. 2024-11-15T11:05:41,987 DEBUG [RS:1;4cdab6d79b20:36113 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:05:41,987 DEBUG [RS:1;4cdab6d79b20:36113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:41,987 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(976): stopping server 4cdab6d79b20,36113,1731668474097; all regions closed. 2024-11-15T11:05:41,987 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(959): stopping server 4cdab6d79b20,45865,1731668473964 2024-11-15T11:05:41,987 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:05:41,987 INFO [RS:0;4cdab6d79b20:45865 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4cdab6d79b20:45865. 2024-11-15T11:05:41,987 DEBUG [RS:0;4cdab6d79b20:45865 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:05:41,987 DEBUG [RS:0;4cdab6d79b20:45865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:41,987 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T11:05:41,987 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1325): Online Regions={62aaee921674ca29eb5bfbb30b16733a=testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a.} 2024-11-15T11:05:41,988 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 62aaee921674ca29eb5bfbb30b16733a, disabling compactions & flushes 2024-11-15T11:05:41,988 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:05:41,988 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:05:41,988 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. after waiting 0 ms 2024-11-15T11:05:41,988 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:05:41,988 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4cdab6d79b20,39207,1731668474152' ***** 2024-11-15T11:05:41,988 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:41,988 DEBUG [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1351): Waiting on 62aaee921674ca29eb5bfbb30b16733a 2024-11-15T11:05:41,988 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T11:05:41,988 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T11:05:41,988 INFO [RS:2;4cdab6d79b20:39207 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T11:05:41,988 INFO [RS:2;4cdab6d79b20:39207 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T11:05:41,988 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T11:05:41,988 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(3091): Received CLOSE for 26514d1e32bf69240f59f1d352a7ad1d 2024-11-15T11:05:41,988 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(3091): Received CLOSE for 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:05:41,988 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(959): stopping server 4cdab6d79b20,39207,1731668474152 2024-11-15T11:05:41,988 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:05:41,989 INFO [RS:2;4cdab6d79b20:39207 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;4cdab6d79b20:39207. 2024-11-15T11:05:41,989 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 26514d1e32bf69240f59f1d352a7ad1d, disabling compactions & flushes 2024-11-15T11:05:41,989 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:05:41,989 DEBUG [RS:2;4cdab6d79b20:39207 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:05:41,989 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:05:41,989 DEBUG [RS:2;4cdab6d79b20:39207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:41,989 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. after waiting 0 ms 2024-11-15T11:05:41,989 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:05:41,989 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:05:41,989 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:05:41,989 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:05:41,989 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T11:05:41,989 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-15T11:05:41,989 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1325): Online Regions={26514d1e32bf69240f59f1d352a7ad1d=testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d., 1588230740=hbase:meta,,1.1588230740, 28dfeddf0af1b0d2554866c3d938dd83=hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83.} 2024-11-15T11:05:41,989 DEBUG [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 26514d1e32bf69240f59f1d352a7ad1d, 28dfeddf0af1b0d2554866c3d938dd83 2024-11-15T11:05:41,990 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T11:05:41,990 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T11:05:41,990 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T11:05:41,990 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T11:05:41,990 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T11:05:41,990 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=69.66 KB heapSize=111.04 KB 2024-11-15T11:05:41,991 INFO [regionserver/4cdab6d79b20:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:05:41,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741834_1010 (size=7859) 2024-11-15T11:05:41,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741834_1010 (size=7859) 2024-11-15T11:05:41,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741834_1010 (size=7859) 2024-11-15T11:05:41,995 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/62aaee921674ca29eb5bfbb30b16733a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T11:05:41,995 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:41,995 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:05:41,995 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 62aaee921674ca29eb5bfbb30b16733a: Waiting for close lock at 1731668741987Running coprocessor pre-close hooks at 1731668741987Disabling compacts and flushes for region at 1731668741987Disabling writes for close at 1731668741988 (+1 ms)Writing region close event to WAL at 1731668741988Running coprocessor post-close hooks at 1731668741995 (+7 ms)Closed at 1731668741995 2024-11-15T11:05:41,996 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a. 2024-11-15T11:05:41,996 INFO [regionserver/4cdab6d79b20:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:05:41,996 DEBUG [RS:1;4cdab6d79b20:36113 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs 2024-11-15T11:05:41,996 INFO [RS:1;4cdab6d79b20:36113 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4cdab6d79b20%2C36113%2C1731668474097:(num 1731668475932) 2024-11-15T11:05:41,996 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/default/testExportExpiredSnapshot/26514d1e32bf69240f59f1d352a7ad1d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T11:05:41,996 DEBUG [RS:1;4cdab6d79b20:36113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:41,996 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:05:41,996 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:05:41,997 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.ChoreService(370): Chore service for: regionserver/4cdab6d79b20:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:05:41,997 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:05:41,997 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:05:41,997 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:05:41,997 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:05:41,997 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:41,997 INFO [regionserver/4cdab6d79b20:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:05:41,997 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:05:41,997 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 26514d1e32bf69240f59f1d352a7ad1d: Waiting for close lock at 1731668741988Running coprocessor pre-close hooks at 1731668741988Disabling compacts and flushes for region at 1731668741988Disabling writes for close at 1731668741989 (+1 ms)Writing region close event to WAL at 1731668741990 (+1 ms)Running coprocessor post-close hooks at 1731668741997 (+7 ms)Closed at 1731668741997 2024-11-15T11:05:41,997 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1731668610016.26514d1e32bf69240f59f1d352a7ad1d. 2024-11-15T11:05:41,997 INFO [RS:1;4cdab6d79b20:36113 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36113 2024-11-15T11:05:41,997 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 28dfeddf0af1b0d2554866c3d938dd83, disabling compactions & flushes 2024-11-15T11:05:41,997 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:05:41,997 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:05:41,997 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. after waiting 0 ms 2024-11-15T11:05:41,997 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:05:41,997 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 28dfeddf0af1b0d2554866c3d938dd83 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-11-15T11:05:42,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:05:42,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4cdab6d79b20,36113,1731668474097 2024-11-15T11:05:42,001 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:05:42,004 INFO [regionserver/4cdab6d79b20:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:05:42,004 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4cdab6d79b20,36113,1731668474097] 2024-11-15T11:05:42,005 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4cdab6d79b20,36113,1731668474097 already deleted, retry=false 2024-11-15T11:05:42,005 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4cdab6d79b20,36113,1731668474097 expired; onlineServers=2 2024-11-15T11:05:42,021 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83/.tmp/l/3c1c61609342477d9507cf26763a564d is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1731668608019/DeleteFamily/seqid=0 2024-11-15T11:05:42,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742347_1523 (size=5695) 2024-11-15T11:05:42,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742347_1523 (size=5695) 2024-11-15T11:05:42,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742347_1523 (size=5695) 2024-11-15T11:05:42,028 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83/.tmp/l/3c1c61609342477d9507cf26763a564d 2024-11-15T11:05:42,028 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/info/479b94af79d6400e95fd66e48a699597 is 173, key is testExportExpiredSnapshot,1,1731668610016.62aaee921674ca29eb5bfbb30b16733a./info:regioninfo/1731668610397/Put/seqid=0 2024-11-15T11:05:42,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742348_1524 (size=14362) 2024-11-15T11:05:42,036 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3c1c61609342477d9507cf26763a564d 2024-11-15T11:05:42,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742348_1524 (size=14362) 2024-11-15T11:05:42,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742348_1524 (size=14362) 2024-11-15T11:05:42,037 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83/.tmp/l/3c1c61609342477d9507cf26763a564d as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83/l/3c1c61609342477d9507cf26763a564d 2024-11-15T11:05:42,037 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.12 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/info/479b94af79d6400e95fd66e48a699597 2024-11-15T11:05:42,042 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3c1c61609342477d9507cf26763a564d 2024-11-15T11:05:42,042 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83/l/3c1c61609342477d9507cf26763a564d, entries=12, sequenceid=27, filesize=5.6 K 2024-11-15T11:05:42,043 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 28dfeddf0af1b0d2554866c3d938dd83 in 46ms, sequenceid=27, compaction requested=false 2024-11-15T11:05:42,050 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/acl/28dfeddf0af1b0d2554866c3d938dd83/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-15T11:05:42,050 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:42,050 INFO [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:05:42,050 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 28dfeddf0af1b0d2554866c3d938dd83: Waiting for close lock at 1731668741997Running coprocessor pre-close hooks at 1731668741997Disabling compacts and flushes for region at 1731668741997Disabling writes for close at 1731668741997Obtaining lock to block concurrent updates at 1731668741997Preparing flush snapshotting stores in 28dfeddf0af1b0d2554866c3d938dd83 at 1731668741997Finished memstore snapshotting hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83., syncing WAL and waiting on mvcc, flushsize=dataSize=1412, getHeapSize=3392, getOffHeapSize=0, getCellsCount=23 at 1731668741998 (+1 ms)Flushing stores of hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. at 1731668741998Flushing 28dfeddf0af1b0d2554866c3d938dd83/l: creating writer at 1731668741998Flushing 28dfeddf0af1b0d2554866c3d938dd83/l: appending metadata at 1731668742021 (+23 ms)Flushing 28dfeddf0af1b0d2554866c3d938dd83/l: closing flushed file at 1731668742021Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12a4036a: reopening flushed file at 1731668742036 (+15 ms)Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 28dfeddf0af1b0d2554866c3d938dd83 in 46ms, sequenceid=27, compaction requested=false at 1731668742043 (+7 ms)Writing region close event to WAL at 1731668742043Running coprocessor post-close hooks at 1731668742050 (+7 ms)Closed at 1731668742050 2024-11-15T11:05:42,051 DEBUG [RS_CLOSE_REGION-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1731668476811.28dfeddf0af1b0d2554866c3d938dd83. 2024-11-15T11:05:42,058 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/ns/8d053e0391984d8a9a141fe7833e0394 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e./ns:/1731668608045/DeleteFamily/seqid=0 2024-11-15T11:05:42,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742349_1525 (size=7779) 2024-11-15T11:05:42,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742349_1525 (size=7779) 2024-11-15T11:05:42,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742349_1525 (size=7779) 2024-11-15T11:05:42,065 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.23 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/ns/8d053e0391984d8a9a141fe7833e0394 2024-11-15T11:05:42,083 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/rep_barrier/e94962a805d540e29e435739f3b2eabb is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e./rep_barrier:/1731668608045/DeleteFamily/seqid=0 2024-11-15T11:05:42,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742350_1526 (size=8005) 2024-11-15T11:05:42,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742350_1526 (size=8005) 2024-11-15T11:05:42,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742350_1526 (size=8005) 2024-11-15T11:05:42,089 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/rep_barrier/e94962a805d540e29e435739f3b2eabb 2024-11-15T11:05:42,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:05:42,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36113-0x101ab9766630002, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:05:42,104 INFO [RS:1;4cdab6d79b20:36113 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:05:42,105 INFO [RS:1;4cdab6d79b20:36113 {}] regionserver.HRegionServer(1031): Exiting; stopping=4cdab6d79b20,36113,1731668474097; zookeeper connection closed. 2024-11-15T11:05:42,105 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9f4251b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9f4251b 2024-11-15T11:05:42,107 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/table/6474811e03234c77a24cab2f8f16c1b7 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731668589555.769c6b89fbcdc818cf21aa5f370a5f6e./table:/1731668608045/DeleteFamily/seqid=0 2024-11-15T11:05:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742351_1527 (size=8758) 2024-11-15T11:05:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742351_1527 (size=8758) 2024-11-15T11:05:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742351_1527 (size=8758) 2024-11-15T11:05:42,113 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.97 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/table/6474811e03234c77a24cab2f8f16c1b7 2024-11-15T11:05:42,117 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/info/479b94af79d6400e95fd66e48a699597 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/info/479b94af79d6400e95fd66e48a699597 2024-11-15T11:05:42,121 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/info/479b94af79d6400e95fd66e48a699597, entries=74, sequenceid=199, filesize=14.0 K 2024-11-15T11:05:42,122 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/ns/8d053e0391984d8a9a141fe7833e0394 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/ns/8d053e0391984d8a9a141fe7833e0394 2024-11-15T11:05:42,126 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/ns/8d053e0391984d8a9a141fe7833e0394, entries=23, sequenceid=199, filesize=7.6 K 2024-11-15T11:05:42,126 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/rep_barrier/e94962a805d540e29e435739f3b2eabb as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/rep_barrier/e94962a805d540e29e435739f3b2eabb 2024-11-15T11:05:42,130 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/rep_barrier/e94962a805d540e29e435739f3b2eabb, entries=21, sequenceid=199, filesize=7.8 K 2024-11-15T11:05:42,131 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/.tmp/table/6474811e03234c77a24cab2f8f16c1b7 as hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/table/6474811e03234c77a24cab2f8f16c1b7 2024-11-15T11:05:42,134 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/table/6474811e03234c77a24cab2f8f16c1b7, entries=36, sequenceid=199, filesize=8.6 K 2024-11-15T11:05:42,135 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 145ms, sequenceid=199, compaction requested=false 2024-11-15T11:05:42,138 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/data/hbase/meta/1588230740/recovered.edits/202.seqid, newMaxSeqId=202, maxSeqId=1 2024-11-15T11:05:42,139 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:42,139 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:05:42,139 INFO [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T11:05:42,139 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731668741990Running coprocessor pre-close hooks at 1731668741990Disabling compacts and flushes for region at 1731668741990Disabling writes for close at 1731668741990Obtaining lock to block concurrent updates at 1731668741990Preparing flush snapshotting stores in 1588230740 at 1731668741990Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=71334, getHeapSize=113640, getOffHeapSize=0, getCellsCount=548 at 1731668741990Flushing stores of hbase:meta,,1.1588230740 at 1731668741991 (+1 ms)Flushing 1588230740/info: creating writer at 1731668741991Flushing 1588230740/info: appending metadata at 1731668742028 (+37 ms)Flushing 1588230740/info: closing flushed file at 1731668742028Flushing 1588230740/ns: creating writer at 1731668742042 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731668742058 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731668742058Flushing 1588230740/rep_barrier: creating writer at 1731668742069 (+11 ms)Flushing 1588230740/rep_barrier: appending metadata at 1731668742083 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1731668742083Flushing 1588230740/table: creating writer at 1731668742092 (+9 ms)Flushing 1588230740/table: appending metadata at 1731668742107 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731668742107Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43c59449: reopening flushed file at 1731668742117 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@edb205a: reopening flushed file at 1731668742121 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46b028ff: reopening flushed file at 1731668742126 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a4e5363: reopening flushed file at 1731668742130 (+4 ms)Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 145ms, sequenceid=199, compaction requested=false at 1731668742135 (+5 ms)Writing region close event to WAL at 1731668742136 (+1 ms)Running coprocessor post-close hooks at 1731668742139 (+3 ms)Closed at 1731668742139 2024-11-15T11:05:42,139 DEBUG [RS_CLOSE_META-regionserver/4cdab6d79b20:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T11:05:42,188 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(976): stopping server 4cdab6d79b20,45865,1731668473964; all regions closed. 2024-11-15T11:05:42,190 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(976): stopping server 4cdab6d79b20,39207,1731668474152; all regions closed. 2024-11-15T11:05:42,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741835_1011 (size=11034) 2024-11-15T11:05:42,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741835_1011 (size=11034) 2024-11-15T11:05:42,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741835_1011 (size=11034) 2024-11-15T11:05:42,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741836_1012 (size=81723) 2024-11-15T11:05:42,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741836_1012 (size=81723) 2024-11-15T11:05:42,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741836_1012 (size=81723) 2024-11-15T11:05:42,194 DEBUG [RS:0;4cdab6d79b20:45865 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4cdab6d79b20%2C45865%2C1731668473964:(num 1731668475935) 2024-11-15T11:05:42,194 DEBUG [RS:0;4cdab6d79b20:45865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.ChoreService(370): Chore service for: regionserver/4cdab6d79b20:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T11:05:42,194 INFO [regionserver/4cdab6d79b20:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T11:05:42,194 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:05:42,195 INFO [RS:0;4cdab6d79b20:45865 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45865 2024-11-15T11:05:42,196 DEBUG [RS:2;4cdab6d79b20:39207 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs 2024-11-15T11:05:42,196 INFO [RS:2;4cdab6d79b20:39207 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4cdab6d79b20%2C39207%2C1731668474152.meta:.meta(num 1731668476391) 2024-11-15T11:05:42,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:05:42,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4cdab6d79b20,45865,1731668473964 2024-11-15T11:05:42,197 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:05:42,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741833_1009 (size=18060) 2024-11-15T11:05:42,198 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4cdab6d79b20,45865,1731668473964] 2024-11-15T11:05:42,200 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4cdab6d79b20,45865,1731668473964 already deleted, retry=false 2024-11-15T11:05:42,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741833_1009 (size=18060) 2024-11-15T11:05:42,200 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4cdab6d79b20,45865,1731668473964 expired; onlineServers=1 2024-11-15T11:05:42,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073741833_1009 (size=18060) 2024-11-15T11:05:42,201 DEBUG [RS:2;4cdab6d79b20:39207 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/oldWALs 2024-11-15T11:05:42,201 INFO [RS:2;4cdab6d79b20:39207 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4cdab6d79b20%2C39207%2C1731668474152:(num 1731668475932) 2024-11-15T11:05:42,201 DEBUG [RS:2;4cdab6d79b20:39207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T11:05:42,201 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T11:05:42,202 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:05:42,202 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.ChoreService(370): Chore service for: regionserver/4cdab6d79b20:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T11:05:42,202 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:05:42,202 INFO [regionserver/4cdab6d79b20:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:05:42,202 INFO [RS:2;4cdab6d79b20:39207 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39207 2024-11-15T11:05:42,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4cdab6d79b20,39207,1731668474152 2024-11-15T11:05:42,204 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:05:42,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T11:05:42,207 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4cdab6d79b20,39207,1731668474152] 2024-11-15T11:05:42,209 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4cdab6d79b20,39207,1731668474152 already deleted, retry=false 2024-11-15T11:05:42,209 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4cdab6d79b20,39207,1731668474152 expired; onlineServers=0 2024-11-15T11:05:42,209 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4cdab6d79b20,33465,1731668473053' ***** 2024-11-15T11:05:42,209 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T11:05:42,209 INFO [M:0;4cdab6d79b20:33465 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T11:05:42,209 INFO [M:0;4cdab6d79b20:33465 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T11:05:42,209 DEBUG [M:0;4cdab6d79b20:33465 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T11:05:42,209 DEBUG [M:0;4cdab6d79b20:33465 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T11:05:42,209 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T11:05:42,209 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster-HFileCleaner.small.0-1731668475495 {}] cleaner.HFileCleaner(306): Exit Thread[master/4cdab6d79b20:0:becomeActiveMaster-HFileCleaner.small.0-1731668475495,5,FailOnTimeoutGroup] 2024-11-15T11:05:42,209 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster-HFileCleaner.large.0-1731668475482 {}] cleaner.HFileCleaner(306): Exit Thread[master/4cdab6d79b20:0:becomeActiveMaster-HFileCleaner.large.0-1731668475482,5,FailOnTimeoutGroup] 2024-11-15T11:05:42,210 INFO [M:0;4cdab6d79b20:33465 {}] hbase.ChoreService(370): Chore service for: master/4cdab6d79b20:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T11:05:42,210 INFO [M:0;4cdab6d79b20:33465 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T11:05:42,210 DEBUG [M:0;4cdab6d79b20:33465 {}] master.HMaster(1795): Stopping service threads 2024-11-15T11:05:42,210 INFO [M:0;4cdab6d79b20:33465 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T11:05:42,210 INFO [M:0;4cdab6d79b20:33465 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T11:05:42,211 INFO [M:0;4cdab6d79b20:33465 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T11:05:42,211 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T11:05:42,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T11:05:42,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T11:05:42,211 DEBUG [M:0;4cdab6d79b20:33465 {}] zookeeper.ZKUtil(347): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T11:05:42,211 WARN [M:0;4cdab6d79b20:33465 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T11:05:42,213 INFO [M:0;4cdab6d79b20:33465 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/.lastflushedseqids 2024-11-15T11:05:42,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073742352_1528 (size=320) 2024-11-15T11:05:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073742352_1528 (size=320) 2024-11-15T11:05:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39395 is added to blk_1073742352_1528 (size=320) 2024-11-15T11:05:42,226 INFO [M:0;4cdab6d79b20:33465 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T11:05:42,226 INFO [M:0;4cdab6d79b20:33465 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T11:05:42,227 DEBUG [M:0;4cdab6d79b20:33465 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T11:05:42,240 INFO [M:0;4cdab6d79b20:33465 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:05:42,240 DEBUG [M:0;4cdab6d79b20:33465 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:05:42,240 DEBUG [M:0;4cdab6d79b20:33465 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T11:05:42,240 DEBUG [M:0;4cdab6d79b20:33465 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T11:05:42,240 INFO [M:0;4cdab6d79b20:33465 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=823.84 KB heapSize=988.61 KB 2024-11-15T11:05:42,241 ERROR [AsyncFSWAL-0-hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData-prefix:4cdab6d79b20,33465,1731668473053 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData-prefix:4cdab6d79b20,33465,1731668473053,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:05:42,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:05:42,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x101ab9766630001, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:05:42,299 INFO [RS:0;4cdab6d79b20:45865 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:05:42,299 INFO [RS:0;4cdab6d79b20:45865 {}] regionserver.HRegionServer(1031): Exiting; stopping=4cdab6d79b20,45865,1731668473964; zookeeper connection closed. 2024-11-15T11:05:42,300 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4501c5c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4501c5c0 2024-11-15T11:05:42,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:05:42,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39207-0x101ab9766630003, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:05:42,306 INFO [RS:2;4cdab6d79b20:39207 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:05:42,306 INFO [RS:2;4cdab6d79b20:39207 {}] regionserver.HRegionServer(1031): Exiting; stopping=4cdab6d79b20,39207,1731668474152; zookeeper connection closed. 2024-11-15T11:05:42,306 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e09720b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e09720b 2024-11-15T11:05:42,307 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-15T11:05:43,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:43,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T11:05:43,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T11:05:43,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-15T11:05:43,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-15T11:05:43,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:43,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-15T11:05:43,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T11:05:44,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42399 is added to blk_1073741830_1006 (size=969025) 2024-11-15T11:05:47,512 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:06:11,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:06:14,274 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-15T11:06:14,274 DEBUG [master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-15T11:06:21,960 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T11:06:41,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;4cdab6d79b20:33465 226 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 34 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@6d5b3081 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@454e36c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3360 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.CountDownLatch$Sync@11b693cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10521 Waited count: 10945 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@12be4fe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26f28b5b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@12bf1e0a): State: TIMED_WAITING Blocked count: 0 Waited count: 666 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1896864382-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1896864382-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1896864382-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1896864382-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1896864382-41-acceptor-0@1f8d300d-ServerConnector@5402212{HTTP/1.1, (http/1.1)}{localhost:43319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1896864382-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1896864382-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1896864382-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-353d932d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 2867 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48863c90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37487): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3d21ac83): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4a337850): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 32831 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49f75576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37487): State: TIMED_WAITING Blocked count: 33 Waited count: 2029 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37487): State: TIMED_WAITING Blocked count: 56 Waited count: 2034 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37487): State: TIMED_WAITING Blocked count: 53 Waited count: 2023 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37487): State: TIMED_WAITING Blocked count: 48 Waited count: 2025 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37487): State: TIMED_WAITING Blocked count: 43 Waited count: 2014 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@72cd0213): State: TIMED_WAITING Blocked count: 0 Waited count: 167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@144b2c64): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@7fba2a2b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@4eb4205e): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(578249369)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp2019510591-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp2019510591-87-acceptor-0@6d882a4b-ServerConnector@b85ed6e{HTTP/1.1, (http/1.1)}{localhost:41363}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp2019510591-88): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp2019510591-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-3659aa0d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@79d596d8): State: TIMED_WAITING Blocked count: 0 Waited count: 663 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37543): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 2 Waited count: 262 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c760868 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1197 Waited count: 1294 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@58ec70): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 117 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1805055997-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1805055997-121): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1805055997-122-acceptor-0@217f5af0-ServerConnector@7026df51{HTTP/1.1, (http/1.1)}{localhost:35651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (474028655) connection to localhost/127.0.0.1:37487 from jenkins): State: TIMED_WAITING Blocked count: 1156 Waited count: 1155 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1805055997-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 0 Waited count: 1792 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-18288a43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@548c575c): State: TIMED_WAITING Blocked count: 0 Waited count: 663 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34249): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 245 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1688bec8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1193 Waited count: 1298 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@52349d10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446539563-157): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446539563-158-acceptor-0@3d120c0b-ServerConnector@42a22cec{HTTP/1.1, (http/1.1)}{localhost:38679}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446539563-159): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446539563-160): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-318802a1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2)): State: TIMED_WAITING Blocked count: 19 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@fd967c9): State: TIMED_WAITING Blocked count: 0 Waited count: 662 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 43923): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@1c68a3b4[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@2dd091fe[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 0 Waited count: 276 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@493c73e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1172 Waited count: 1300 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2b4dee50): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 190 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@16ad484f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55607): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 353 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ced46cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:55607):): State: WAITING Blocked count: 1 Waited count: 433 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@93c578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 465 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8f5115b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7618cc0f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 287 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:55607)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48e1c434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1142a0c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@210a6e5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 84 Waited count: 331 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ad24e2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 134 Waited count: 534 Waiting on java.util.concurrent.Semaphore$NonfairSync@7cd4ed27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465): State: WAITING Blocked count: 88 Waited count: 5853 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78945e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22951332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@79737f92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2349ae9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d92d84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57145c49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 21 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;4cdab6d79b20:33465): State: TIMED_WAITING Blocked count: 12 Waited count: 2692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f1eb0fa5250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@336fa6f3): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3274 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32678 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 34 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 20 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10bf1ab1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9249d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ad4b0d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6c2e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 507 (LeaseRenewer:jenkins.hfs.2@localhost:37487): State: TIMED_WAITING Blocked count: 8 Waited count: 339 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 509 (LeaseRenewer:jenkins.hfs.0@localhost:37487): State: TIMED_WAITING Blocked count: 8 Waited count: 339 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32525 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 384 Waiting on java.util.concurrent.ForkJoinPool@18ac9e61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 557 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 587 (region-location-1): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4832f57a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1137 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1138 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1463 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@38f36e22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1771 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1820 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1821 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3173 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3174 (region-location-4): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4614 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8296 (AsyncFSWAL-1-hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData-prefix:4cdab6d79b20,33465,1731668473053): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44d0a0cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8299 (java.util.concurrent.ThreadPoolExecutor$Worker@537e6718[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8302 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T11:07:11,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:07:41,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;4cdab6d79b20:33465 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 34 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@6d5b3081 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@454e36c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3960 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.CountDownLatch$Sync@71e2609e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10521 Waited count: 10946 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@12be4fe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26f28b5b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@12bf1e0a): State: TIMED_WAITING Blocked count: 0 Waited count: 786 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1896864382-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1896864382-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1896864382-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1896864382-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1896864382-41-acceptor-0@1f8d300d-ServerConnector@5402212{HTTP/1.1, (http/1.1)}{localhost:43319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1896864382-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1896864382-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1896864382-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-353d932d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 2867 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48863c90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37487): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3d21ac83): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4a337850): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 38796 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49f75576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37487): State: TIMED_WAITING Blocked count: 33 Waited count: 2091 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37487): State: TIMED_WAITING Blocked count: 56 Waited count: 2095 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37487): State: TIMED_WAITING Blocked count: 53 Waited count: 2084 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37487): State: TIMED_WAITING Blocked count: 48 Waited count: 2086 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37487): State: TIMED_WAITING Blocked count: 43 Waited count: 2076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@72cd0213): State: TIMED_WAITING Blocked count: 0 Waited count: 197 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@144b2c64): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@7fba2a2b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@4eb4205e): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(578249369)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp2019510591-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp2019510591-87-acceptor-0@6d882a4b-ServerConnector@b85ed6e{HTTP/1.1, (http/1.1)}{localhost:41363}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp2019510591-88): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp2019510591-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-3659aa0d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@79d596d8): State: TIMED_WAITING Blocked count: 0 Waited count: 784 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37543): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 2 Waited count: 282 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c760868 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1217 Waited count: 1334 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@58ec70): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 411 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 409 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 117 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1805055997-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1805055997-121): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1805055997-122-acceptor-0@217f5af0-ServerConnector@7026df51{HTTP/1.1, (http/1.1)}{localhost:35651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (474028655) connection to localhost/127.0.0.1:37487 from jenkins): State: TIMED_WAITING Blocked count: 1216 Waited count: 1215 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1805055997-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 0 Waited count: 1852 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-18288a43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@548c575c): State: TIMED_WAITING Blocked count: 0 Waited count: 783 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34249): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 265 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1688bec8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1213 Waited count: 1338 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@52349d10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446539563-157): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446539563-158-acceptor-0@3d120c0b-ServerConnector@42a22cec{HTTP/1.1, (http/1.1)}{localhost:38679}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446539563-159): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446539563-160): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-318802a1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2)): State: TIMED_WAITING Blocked count: 19 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@fd967c9): State: TIMED_WAITING Blocked count: 0 Waited count: 782 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 43923): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@1c68a3b4[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@2dd091fe[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 0 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@493c73e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1192 Waited count: 1340 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2b4dee50): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 190 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@16ad484f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55607): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ced46cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:55607):): State: WAITING Blocked count: 1 Waited count: 438 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@93c578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 470 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8f5115b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7618cc0f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 327 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:55607)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48e1c434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1142a0c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@210a6e5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 84 Waited count: 331 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ad24e2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 134 Waited count: 534 Waiting on java.util.concurrent.Semaphore$NonfairSync@7cd4ed27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465): State: WAITING Blocked count: 88 Waited count: 5853 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78945e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22951332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@79737f92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2349ae9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d92d84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57145c49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 21 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;4cdab6d79b20:33465): State: TIMED_WAITING Blocked count: 12 Waited count: 2692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f1eb0fa5250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@336fa6f3): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3874 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65326055 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38680 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 34 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 20 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10bf1ab1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9249d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ad4b0d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6c2e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38527 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 587 (region-location-1): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 416 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4832f57a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1137 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1138 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1463 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@38f36e22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1771 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1820 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1821 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3173 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3174 (region-location-4): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4614 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8296 (AsyncFSWAL-1-hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData-prefix:4cdab6d79b20,33465,1731668473053): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44d0a0cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8302 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T11:08:11,952 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:08:41,952 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;4cdab6d79b20:33465 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 34 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@6d5b3081 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@454e36c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.CountDownLatch$Sync@30f54091 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10521 Waited count: 10947 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@12be4fe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26f28b5b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@12bf1e0a): State: TIMED_WAITING Blocked count: 0 Waited count: 906 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1896864382-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1896864382-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1896864382-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1896864382-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1896864382-41-acceptor-0@1f8d300d-ServerConnector@5402212{HTTP/1.1, (http/1.1)}{localhost:43319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1896864382-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1896864382-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1896864382-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-353d932d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 2867 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48863c90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37487): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3d21ac83): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4a337850): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44761 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49f75576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37487): State: TIMED_WAITING Blocked count: 33 Waited count: 2152 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37487): State: TIMED_WAITING Blocked count: 56 Waited count: 2156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37487): State: TIMED_WAITING Blocked count: 53 Waited count: 2145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37487): State: TIMED_WAITING Blocked count: 48 Waited count: 2147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37487): State: TIMED_WAITING Blocked count: 43 Waited count: 2136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@72cd0213): State: TIMED_WAITING Blocked count: 0 Waited count: 227 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@144b2c64): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@7fba2a2b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@4eb4205e): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(578249369)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp2019510591-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp2019510591-87-acceptor-0@6d882a4b-ServerConnector@b85ed6e{HTTP/1.1, (http/1.1)}{localhost:41363}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp2019510591-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp2019510591-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-3659aa0d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@79d596d8): State: TIMED_WAITING Blocked count: 0 Waited count: 904 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37543): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 2 Waited count: 302 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c760868 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1237 Waited count: 1374 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@58ec70): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 480 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 117 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1805055997-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1805055997-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1805055997-122-acceptor-0@217f5af0-ServerConnector@7026df51{HTTP/1.1, (http/1.1)}{localhost:35651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (474028655) connection to localhost/127.0.0.1:37487 from jenkins): State: TIMED_WAITING Blocked count: 1276 Waited count: 1275 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1805055997-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 0 Waited count: 1912 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-18288a43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@548c575c): State: TIMED_WAITING Blocked count: 0 Waited count: 903 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34249): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 285 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1688bec8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1233 Waited count: 1378 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@52349d10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 454 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446539563-157): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446539563-158-acceptor-0@3d120c0b-ServerConnector@42a22cec{HTTP/1.1, (http/1.1)}{localhost:38679}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446539563-159): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446539563-160): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-318802a1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2)): State: TIMED_WAITING Blocked count: 19 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@fd967c9): State: TIMED_WAITING Blocked count: 0 Waited count: 902 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 43923): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@1c68a3b4[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@2dd091fe[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 0 Waited count: 316 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@493c73e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1212 Waited count: 1380 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2b4dee50): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 190 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@16ad484f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55607): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 226 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 362 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ced46cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:55607):): State: WAITING Blocked count: 1 Waited count: 442 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@93c578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 474 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8f5115b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7618cc0f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 367 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:55607)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48e1c434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1142a0c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@210a6e5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 84 Waited count: 331 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ad24e2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 134 Waited count: 534 Waiting on java.util.concurrent.Semaphore$NonfairSync@7cd4ed27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465): State: WAITING Blocked count: 88 Waited count: 5853 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78945e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22951332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@79737f92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2349ae9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d92d84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57145c49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 21 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;4cdab6d79b20:33465): State: TIMED_WAITING Blocked count: 12 Waited count: 2692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f1eb0fa5250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@336fa6f3): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4474 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65326055 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44682 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 34 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 20 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10bf1ab1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9249d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ad4b0d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6c2e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44529 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-1): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4832f57a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1137 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1138 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1463 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@38f36e22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1771 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1820 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1821 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3173 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3174 (region-location-4): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4614 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8296 (AsyncFSWAL-1-hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData-prefix:4cdab6d79b20,33465,1731668473053): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44d0a0cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8302 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T11:09:11,952 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:09:41,952 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;4cdab6d79b20:33465 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 45 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@6d5b3081 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@454e36c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@140f0be9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10521 Waited count: 10948 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@12be4fe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26f28b5b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@12bf1e0a): State: TIMED_WAITING Blocked count: 0 Waited count: 1027 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1896864382-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1896864382-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1896864382-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1896864382-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1896864382-41-acceptor-0@1f8d300d-ServerConnector@5402212{HTTP/1.1, (http/1.1)}{localhost:43319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1896864382-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1896864382-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1896864382-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-353d932d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 2867 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48863c90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37487): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3d21ac83): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4a337850): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50726 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49f75576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37487): State: TIMED_WAITING Blocked count: 33 Waited count: 2213 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37487): State: TIMED_WAITING Blocked count: 56 Waited count: 2217 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37487): State: TIMED_WAITING Blocked count: 53 Waited count: 2206 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37487): State: TIMED_WAITING Blocked count: 48 Waited count: 2208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37487): State: TIMED_WAITING Blocked count: 43 Waited count: 2197 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@72cd0213): State: TIMED_WAITING Blocked count: 0 Waited count: 257 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@144b2c64): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@7fba2a2b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@4eb4205e): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(578249369)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp2019510591-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp2019510591-87-acceptor-0@6d882a4b-ServerConnector@b85ed6e{HTTP/1.1, (http/1.1)}{localhost:41363}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp2019510591-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp2019510591-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-3659aa0d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@79d596d8): State: TIMED_WAITING Blocked count: 0 Waited count: 1024 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37543): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 2 Waited count: 322 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c760868 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1257 Waited count: 1414 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@58ec70): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 536 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 117 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1805055997-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1805055997-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1805055997-122-acceptor-0@217f5af0-ServerConnector@7026df51{HTTP/1.1, (http/1.1)}{localhost:35651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (474028655) connection to localhost/127.0.0.1:37487 from jenkins): State: TIMED_WAITING Blocked count: 1336 Waited count: 1335 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1805055997-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 0 Waited count: 1972 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-18288a43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@548c575c): State: TIMED_WAITING Blocked count: 0 Waited count: 1023 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34249): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 305 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1688bec8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1253 Waited count: 1418 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@52349d10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446539563-157): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446539563-158-acceptor-0@3d120c0b-ServerConnector@42a22cec{HTTP/1.1, (http/1.1)}{localhost:38679}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446539563-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446539563-160): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-318802a1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2)): State: TIMED_WAITING Blocked count: 19 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@fd967c9): State: TIMED_WAITING Blocked count: 0 Waited count: 1022 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 43923): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@1c68a3b4[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@2dd091fe[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 0 Waited count: 336 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@493c73e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1232 Waited count: 1420 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2b4dee50): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 190 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@16ad484f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55607): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ced46cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:55607):): State: WAITING Blocked count: 1 Waited count: 447 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@93c578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 479 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8f5115b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7618cc0f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:55607)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48e1c434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1142a0c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@210a6e5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 84 Waited count: 331 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ad24e2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 134 Waited count: 534 Waiting on java.util.concurrent.Semaphore$NonfairSync@7cd4ed27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465): State: WAITING Blocked count: 88 Waited count: 5853 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78945e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22951332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@79737f92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2349ae9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d92d84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57145c49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 21 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;4cdab6d79b20:33465): State: TIMED_WAITING Blocked count: 12 Waited count: 2692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f1eb0fa5250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@336fa6f3): State: TIMED_WAITING Blocked count: 0 Waited count: 170 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5074 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65326055 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50684 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 34 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 20 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10bf1ab1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9249d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ad4b0d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6c2e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50531 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-1): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 428 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4832f57a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1137 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1138 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1463 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@38f36e22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1771 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1820 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1821 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3173 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3174 (region-location-4): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4614 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8296 (AsyncFSWAL-1-hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData-prefix:4cdab6d79b20,33465,1731668473053): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44d0a0cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8302 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T11:10:11,953 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:10:41,953 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T11:10:42,242 DEBUG [M:0;4cdab6d79b20:33465 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731668742226Disabling compacts and flushes for region at 1731668742226Disabling writes for close at 1731668742240 (+14 ms)Obtaining lock to block concurrent updates at 1731668742240Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731668742240Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=843610, getHeapSize=1012272, getOffHeapSize=0, getCellsCount=2221 at 1731668742241 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1731669042242 (+300001 ms) 2024-11-15T11:10:42,242 WARN [M:0;4cdab6d79b20:33465 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-15T11:10:42,244 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:10:42,245 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-15T11:10:42,246 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-15T11:10:42,246 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693 2024-11-15T11:10:42,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:10:42,249 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:10:42,249 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693 2024-11-15T11:10:42,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;4cdab6d79b20:33465 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 45 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@6d5b3081 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@454e36c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5759 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@5a36a8a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10521 Waited count: 10949 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@12be4fe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26f28b5b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@12bf1e0a): State: TIMED_WAITING Blocked count: 0 Waited count: 1147 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1896864382-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1896864382-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1896864382-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1896864382-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1896864382-41-acceptor-0@1f8d300d-ServerConnector@5402212{HTTP/1.1, (http/1.1)}{localhost:43319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1896864382-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1896864382-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1896864382-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-353d932d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 2867 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48863c90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37487): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3d21ac83): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4a337850): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56690 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49f75576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37487): State: TIMED_WAITING Blocked count: 33 Waited count: 2274 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37487): State: TIMED_WAITING Blocked count: 56 Waited count: 2278 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37487): State: TIMED_WAITING Blocked count: 53 Waited count: 2267 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37487): State: TIMED_WAITING Blocked count: 48 Waited count: 2269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37487): State: TIMED_WAITING Blocked count: 43 Waited count: 2258 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@72cd0213): State: TIMED_WAITING Blocked count: 0 Waited count: 287 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@144b2c64): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@7fba2a2b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@4eb4205e): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(578249369)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp2019510591-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp2019510591-87-acceptor-0@6d882a4b-ServerConnector@b85ed6e{HTTP/1.1, (http/1.1)}{localhost:41363}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp2019510591-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp2019510591-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-3659aa0d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@79d596d8): State: TIMED_WAITING Blocked count: 0 Waited count: 1144 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37543): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 2 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c760868 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1277 Waited count: 1454 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@58ec70): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 600 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 601 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37543): State: TIMED_WAITING Blocked count: 0 Waited count: 598 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 117 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1805055997-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1805055997-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1805055997-122-acceptor-0@217f5af0-ServerConnector@7026df51{HTTP/1.1, (http/1.1)}{localhost:35651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (474028655) connection to localhost/127.0.0.1:37487 from jenkins): State: TIMED_WAITING Blocked count: 1396 Waited count: 1395 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1805055997-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 0 Waited count: 2032 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-18288a43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@548c575c): State: TIMED_WAITING Blocked count: 0 Waited count: 1143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34249): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 325 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1688bec8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1273 Waited count: 1458 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@52349d10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34249): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446539563-157): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1eb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446539563-158-acceptor-0@3d120c0b-ServerConnector@42a22cec{HTTP/1.1, (http/1.1)}{localhost:38679}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446539563-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446539563-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-318802a1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2)): State: TIMED_WAITING Blocked count: 19 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@fd967c9): State: TIMED_WAITING Blocked count: 0 Waited count: 1142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 43923): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@1c68a3b4[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@2dd091fe[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 0 Waited count: 356 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@493c73e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487): State: TIMED_WAITING Blocked count: 1252 Waited count: 1460 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2b4dee50): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 190 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 43923): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6/current/BP-1221769701-172.17.0.2-1731668468836): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@16ad484f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55607): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 371 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ced46cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:55607):): State: WAITING Blocked count: 1 Waited count: 451 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@93c578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 483 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8f5115b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7618cc0f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:55607)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48e1c434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1142a0c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70c537e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@210a6e5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 84 Waited count: 331 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ad24e2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 134 Waited count: 534 Waiting on java.util.concurrent.Semaphore$NonfairSync@7cd4ed27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33465): State: WAITING Blocked count: 88 Waited count: 5853 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78945e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a9e876a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22951332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@79737f92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2349ae9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33465): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d92d84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57145c49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 21 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;4cdab6d79b20:33465): State: TIMED_WAITING Blocked count: 12 Waited count: 2693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1333/0x00007f1eb11e5a08.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/4cdab6d79b20:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@336fa6f3): State: TIMED_WAITING Blocked count: 0 Waited count: 190 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5673 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65326055 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56685 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 34 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 20 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10bf1ab1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9249d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ad4b0d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/4cdab6d79b20:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6c2e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56532 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-1): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4832f57a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1137 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1138 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1463 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@38f36e22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1771 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1820 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1821 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3173 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3174 (region-location-4): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eedbed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4614 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8296 (AsyncFSWAL-1-hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData-prefix:4cdab6d79b20,33465,1731668473053): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44d0a0cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8302 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8303 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8304 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1327/0x00007f1eb11ddf30.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-15T11:10:46,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T11:10:47,244 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-15T11:10:47,244 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T11:10:47,244 INFO [M:0;4cdab6d79b20:33465 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T11:10:47,245 INFO [M:0;4cdab6d79b20:33465 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33465 2024-11-15T11:10:47,245 INFO [M:0;4cdab6d79b20:33465 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T11:10:47,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37487/user/jenkins/test-data/0f38b161-48f7-7e39-38a3-97288bb2bdb7/MasterData/WALs/4cdab6d79b20,33465,1731668473053/4cdab6d79b20%2C33465%2C1731668473053.1731668474693 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-15T11:10:47,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:10:47,347 INFO [M:0;4cdab6d79b20:33465 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T11:10:47,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33465-0x101ab9766630000, quorum=127.0.0.1:55607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T11:10:47,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@469f0591{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:10:47,351 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42a22cec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:10:47,351 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:10:47,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76d904c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:10:47,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19d73582{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:10:47,353 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:10:47,353 WARN [BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:10:47,353 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:10:47,353 WARN [BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1221769701-172.17.0.2-1731668468836 (Datanode Uuid 80e11d40-360e-46ae-a031-e0db59912a7f) service to localhost/127.0.0.1:37487 2024-11-15T11:10:47,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data5/current/BP-1221769701-172.17.0.2-1731668468836 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:10:47,355 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data6/current/BP-1221769701-172.17.0.2-1731668468836 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:10:47,355 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:10:47,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53c777d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:10:47,359 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7026df51{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:10:47,359 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:10:47,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1524f378{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:10:47,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35cd15ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:10:47,360 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:10:47,360 WARN [BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:10:47,360 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:10:47,360 WARN [BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1221769701-172.17.0.2-1731668468836 (Datanode Uuid a15cdfc4-77ba-4018-a9bb-fe1eae10158c) service to localhost/127.0.0.1:37487 2024-11-15T11:10:47,361 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data3/current/BP-1221769701-172.17.0.2-1731668468836 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:10:47,361 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data4/current/BP-1221769701-172.17.0.2-1731668468836 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:10:47,361 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:10:47,365 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@13c554f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T11:10:47,365 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b85ed6e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:10:47,365 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:10:47,365 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3177f68b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:10:47,365 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52f556e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:10:47,366 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T11:10:47,366 WARN [BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T11:10:47,366 WARN [BP-1221769701-172.17.0.2-1731668468836 heartbeating to localhost/127.0.0.1:37487 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1221769701-172.17.0.2-1731668468836 (Datanode Uuid 2ecd9689-13bb-4b14-ae24-35c85d3af906) service to localhost/127.0.0.1:37487 2024-11-15T11:10:47,366 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T11:10:47,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data1/current/BP-1221769701-172.17.0.2-1731668468836 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:10:47,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/cluster_8dbbdebe-078b-b6be-f21f-911a7ec2bf95/data/data2/current/BP-1221769701-172.17.0.2-1731668468836 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T11:10:47,367 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T11:10:47,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@397082b1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T11:10:47,374 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5402212{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T11:10:47,374 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T11:10:47,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50f4ffa2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T11:10:47,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@650dba7d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/63bea719-379f-ada0-9a51-1589803c3009/hadoop.log.dir/,STOPPED} 2024-11-15T11:10:47,385 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T11:10:47,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down